max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
electrondiffraction/__init__.py
drix00/ElectronDiffraction
1
2500
# -*- coding: utf-8 -*- __author__ = """<NAME>""" __email__ = '<EMAIL>' __version__ = '0.1.0'
1.054688
1
storelet.py
markembling/storelet
1
2501
import os import logging from tempfile import mkstemp, mkdtemp from shutil import rmtree from zipfile import ZipFile, ZIP_DEFLATED from datetime import datetime from boto.s3.connection import S3Connection from boto.s3.key import Key __version__ = "0.1.8" __author__ = "<NAME>" __email__ = "<EMAIL>" logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) LOGGING_DEFAULTS = {"level": logging.INFO, "format": "%(asctime)s [%(levelname)s]: %(message)s"} def setup_logging(**kwargs): """Convenience function for setting up some sane logging defaults""" opts = dict(LOGGING_DEFAULTS.items() + kwargs.items()) logging.basicConfig(**opts) class ZipBackup(object): """ A compressed ZIP file backup Note: large inclusion operations can sometimes take time as files are compressed on the fly. This prevents all the files being copied to a temporary location (and using unnecessary extra space) and storing up the need for a potentially large compression at the end. """ def __init__(self, name): self.name = name _, self._path = mkstemp() logger.debug("Created temporary file %s" % self._path) def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def close(self): os.remove(self._path) logger.debug("Removed temporary file %s" % self._path) def include_directory(self, path, preserve_paths=False, name=None): """Add the contents of a directory to the backup""" path = os.path.abspath(path) logger.debug("Adding directory %s" % path) with ZipFile(self._path, 'a', ZIP_DEFLATED, allowZip64=True) as zipfile: for base,dirs,files in os.walk(path): logger.debug("Walking directory %s" % path) for file in files: filename = os.path.join(base, file) try: zipfile.write(filename, self._get_filename_for_archive( path, filename, preserve_paths, name)) logger.info("Added file %s" % filename) except: logger.warn("Could not add file %s" % file, exc_info=True) logger.debug("Finished directory %s" % path) def save_to_s3(self, bucket, access_key, secret_key, **kwargs): """Save the backup to Amazon S3""" logger.info("Saving to S3 in '%s' bucket" % bucket) conn = S3Connection(access_key, secret_key, **kwargs) bucket = conn.get_bucket(bucket) key = Key(bucket) key.key = '%<KEY>' % \ (self.name, datetime.now().strftime("%Y%m%d%H%M%S")) key.set_contents_from_filename(self._path) logger.info("Saving to S3 done %s" % key.key) def include_new_dir(self, name): """Add a new empty directory to the backup""" return BackupIncludedDirectory(name, self) def _get_filename_for_archive(self, directory, filename, preserve_paths, name): if not preserve_paths: filename = filename.replace(directory, "") if name is not None: filename = name + os.sep + filename return filename class BackupIncludedDirectory(object): """A new directory which is subsequently added to the backup""" def __init__(self, name, owner): self.name = name self.path = mkdtemp() self._owner = owner logger.debug("Created temporary directory %s" % self.path) def __str__(self): return self.path def __enter__(self): return self def __exit__(self, type, value, traceback): self._owner.include_directory(self.path, preserve_paths=False, name=self.name) rmtree(self.path) logger.debug("Removed temporary directory %s" % self.path)
2.5
2
apps/delivery/migrations/0001_initial.py
jimforit/lagou
2
2502
# Generated by Django 2.0.2 on 2019-03-08 13:03 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Delivery', fields=[ ('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')), ('is_delete', models.BooleanField(default=False, verbose_name='删除标记')), ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='投递ID')), ('delivery_status', models.CharField(choices=[('DD', '待定'), ('YQ', '邀请面试'), ('WJ', '婉拒')], default='DD', max_length=2, verbose_name='投递状态')), ], options={ 'verbose_name': '面试', 'verbose_name_plural': '面试', }, ), ]
1.890625
2
elementary/date-and-time-convertor.py
vargad/exercises
1
2503
#!/usr/bin/env python3 def date_time(time): months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"] hour, minute = int(time[11:13]), int(time[14:16]) return f"{int(time[0:2])} {months[int(time[3:5])-1]} {time[6:10]} year {hour} hour{'s' if hour!=1 else ''} {minute} minute{'s' if minute!=1 else ''}" if __name__ == '__main__': print(date_time("01.01.2018 00:00")) assert date_time("01.01.2018 00:00") == "1 January 2018 year 0 hours 0 minutes" assert date_time("04.08.1984 08:15") == "4 August 1984 year 8 hours 15 minutes" assert date_time("17.12.1990 07:42") == "17 December 1990 year 7 hours 42 minutes"
4.15625
4
lbry/wallet/server/peer.py
snapperVibes/lbry-sdk
2
2504
<reponame>snapperVibes/lbry-sdk # Copyright (c) 2017, <NAME> # # All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Representation of a peer server.""" from ipaddress import ip_address from lbry.wallet.server import util from lbry.wallet.server.util import cachedproperty from typing import Dict class Peer: # Protocol version ATTRS = ('host', 'features', # metadata 'source', 'ip_addr', 'last_good', 'last_try', 'try_count') FEATURES = ('pruning', 'server_version', 'protocol_min', 'protocol_max', 'ssl_port', 'tcp_port') # This should be set by the application DEFAULT_PORTS: Dict[str, int] = {} def __init__(self, host, features, source='unknown', ip_addr=None, last_good=0, last_try=0, try_count=0): """Create a peer given a host name (or IP address as a string), a dictionary of features, and a record of the source.""" assert isinstance(host, str) assert isinstance(features, dict) assert host in features.get('hosts', {}) self.host = host self.features = features.copy() # Canonicalize / clean-up for feature in self.FEATURES: self.features[feature] = getattr(self, feature) # Metadata self.source = source self.ip_addr = ip_addr # last_good represents the last connection that was # successful *and* successfully verified, at which point # try_count is set to 0. Failure to connect or failure to # verify increment the try_count. self.last_good = last_good self.last_try = last_try self.try_count = try_count # Transient, non-persisted metadata self.bad = False self.other_port_pairs = set() self.status = 2 @classmethod def peers_from_features(cls, features, source): peers = [] if isinstance(features, dict): hosts = features.get('hosts') if isinstance(hosts, dict): peers = [Peer(host, features, source=source) for host in hosts if isinstance(host, str)] return peers @classmethod def deserialize(cls, item): """Deserialize from a dictionary.""" return cls(**item) def matches(self, peers): """Return peers whose host matches our hostname or IP address. Additionally include all peers whose IP address matches our hostname if that is an IP address. """ candidates = (self.host.lower(), self.ip_addr) return [peer for peer in peers if peer.host.lower() in candidates or peer.ip_addr == self.host] def __str__(self): return self.host def update_features(self, features): """Update features in-place.""" try: tmp = Peer(self.host, features) except Exception: pass else: self.update_features_from_peer(tmp) def update_features_from_peer(self, peer): if peer != self: self.features = peer.features for feature in self.FEATURES: setattr(self, feature, getattr(peer, feature)) def connection_port_pairs(self): """Return a list of (kind, port) pairs to try when making a connection.""" # Use a list not a set - it's important to try the registered # ports first. pairs = [('SSL', self.ssl_port), ('TCP', self.tcp_port)] while self.other_port_pairs: pairs.append(self.other_port_pairs.pop()) return [pair for pair in pairs if pair[1]] def mark_bad(self): """Mark as bad to avoid reconnects but also to remember for a while.""" self.bad = True def check_ports(self, other): """Remember differing ports in case server operator changed them or removed one.""" if other.ssl_port != self.ssl_port: self.other_port_pairs.add(('SSL', other.ssl_port)) if other.tcp_port != self.tcp_port: self.other_port_pairs.add(('TCP', other.tcp_port)) return bool(self.other_port_pairs) @cachedproperty def is_tor(self): return self.host.endswith('.onion') @cachedproperty def is_valid(self): ip = self.ip_address if ip: return ((ip.is_global or ip.is_private) and not (ip.is_multicast or ip.is_unspecified)) return util.is_valid_hostname(self.host) @cachedproperty def is_public(self): ip = self.ip_address if ip: return self.is_valid and not ip.is_private else: return self.is_valid and self.host != 'localhost' @cachedproperty def ip_address(self): """The host as a python ip_address object, or None.""" try: return ip_address(self.host) except ValueError: return None def bucket(self): if self.is_tor: return 'onion' if not self.ip_addr: return '' return tuple(self.ip_addr.split('.')[:2]) def serialize(self): """Serialize to a dictionary.""" return {attr: getattr(self, attr) for attr in self.ATTRS} def _port(self, key): hosts = self.features.get('hosts') if isinstance(hosts, dict): host = hosts.get(self.host) port = self._integer(key, host) if port and 0 < port < 65536: return port return None def _integer(self, key, d=None): d = d or self.features result = d.get(key) if isinstance(d, dict) else None if isinstance(result, str): try: result = int(result) except ValueError: pass return result if isinstance(result, int) else None def _string(self, key): result = self.features.get(key) return result if isinstance(result, str) else None @cachedproperty def genesis_hash(self): """Returns None if no SSL port, otherwise the port as an integer.""" return self._string('genesis_hash') @cachedproperty def ssl_port(self): """Returns None if no SSL port, otherwise the port as an integer.""" return self._port('ssl_port') @cachedproperty def tcp_port(self): """Returns None if no TCP port, otherwise the port as an integer.""" return self._port('tcp_port') @cachedproperty def server_version(self): """Returns the server version as a string if known, otherwise None.""" return self._string('server_version') @cachedproperty def pruning(self): """Returns the pruning level as an integer. None indicates no pruning.""" pruning = self._integer('pruning') if pruning and pruning > 0: return pruning return None def _protocol_version_string(self, key): version_str = self.features.get(key) ptuple = util.protocol_tuple(version_str) return util.version_string(ptuple) @cachedproperty def protocol_min(self): """Minimum protocol version as a string, e.g., 1.0""" return self._protocol_version_string('protocol_min') @cachedproperty def protocol_max(self): """Maximum protocol version as a string, e.g., 1.1""" return self._protocol_version_string('protocol_max') def to_tuple(self): """The tuple ((ip, host, details) expected in response to a peers subscription.""" details = self.real_name().split()[1:] return (self.ip_addr or self.host, self.host, details) def real_name(self): """Real name of this peer as used on IRC.""" def port_text(letter, port): if port == self.DEFAULT_PORTS.get(letter): return letter else: return letter + str(port) parts = [self.host, 'v' + self.protocol_max] if self.pruning: parts.append(f'p{self.pruning:d}') for letter, port in (('s', self.ssl_port), ('t', self.tcp_port)): if port: parts.append(port_text(letter, port)) return ' '.join(parts) @classmethod def from_real_name(cls, real_name, source): """Real name is a real name as on IRC, such as "erbium1.sytes.net v1.0 s t" Returns an instance of this Peer class. """ host = 'nohost' features = {} ports = {} for n, part in enumerate(real_name.split()): if n == 0: host = part continue if part[0] in ('s', 't'): if len(part) == 1: port = cls.DEFAULT_PORTS[part[0]] else: port = part[1:] if part[0] == 's': ports['ssl_port'] = port else: ports['tcp_port'] = port elif part[0] == 'v': features['protocol_max'] = features['protocol_min'] = part[1:] elif part[0] == 'p': features['pruning'] = part[1:] features.update(ports) features['hosts'] = {host: ports} return cls(host, features, source)
1.695313
2
tests/components/deconz/test_diagnostics.py
aomann/core
0
2505
"""Test deCONZ diagnostics.""" from unittest.mock import patch from pydeconz.websocket import STATE_RUNNING from homeassistant.const import Platform from .test_gateway import DECONZ_CONFIG, setup_deconz_integration from tests.components.diagnostics import get_diagnostics_for_config_entry async def test_entry_diagnostics( hass, hass_client, aioclient_mock, mock_deconz_websocket ): """Test config entry diagnostics.""" config_entry = await setup_deconz_integration(hass, aioclient_mock) await mock_deconz_websocket(state=STATE_RUNNING) await hass.async_block_till_done() with patch( "homeassistant.helpers.system_info.async_get_system_info", return_value={"get_system_info": "fake data"}, ): assert await get_diagnostics_for_config_entry( hass, hass_client, config_entry ) == { "home_assistant": {"get_system_info": "fake data"}, "config_entry": dict(config_entry.data), "deconz_config": DECONZ_CONFIG, "websocket_state": STATE_RUNNING, "deconz_ids": {}, "entities": { str(Platform.ALARM_CONTROL_PANEL): [], str(Platform.BINARY_SENSOR): [], str(Platform.CLIMATE): [], str(Platform.COVER): [], str(Platform.FAN): [], str(Platform.LIGHT): [], str(Platform.LOCK): [], str(Platform.NUMBER): [], str(Platform.SENSOR): [], str(Platform.SIREN): [], str(Platform.SWITCH): [], }, "events": {}, "alarm_systems": {}, "groups": {}, "lights": {}, "scenes": {}, "sensors": {}, }
2.015625
2
jax_md/partition.py
l1zp/jax-md
0
2506
<filename>jax_md/partition.py # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to transform functions on individual tuples of particles to sets.""" from absl import logging from functools import reduce, partial from collections import namedtuple from enum import Enum from typing import Any, Callable, Optional, Dict, Tuple, Generator, Union import math from operator import mul import numpy as onp from jax import lax from jax import ops from jax import jit, vmap, eval_shape from jax.abstract_arrays import ShapedArray from jax.interpreters import partial_eval as pe import jax.numpy as jnp from jax_md import quantity, space, dataclasses, util import jraph # Types Array = util.Array f32 = util.f32 f64 = util.f64 i32 = util.i32 i64 = util.i64 Box = space.Box DisplacementOrMetricFn = space.DisplacementOrMetricFn MetricFn = space.MetricFn # Cell List @dataclasses.dataclass class CellList: """Stores the spatial partition of a system into a cell list. See cell_list(...) for details on the construction / specification. Cell list buffers all have a common shape, S, where * `S = [cell_count_x, cell_count_y, cell_capacity]` * `S = [cell_count_x, cell_count_y, cell_count_z, cell_capacity]` in two- and three-dimensions respectively. It is assumed that each cell has the same capacity. Attributes: position_buffer: An ndarray of floating point positions with shape S + [spatial_dimension]. id_buffer: An ndarray of int32 particle ids of shape S. Note that empty slots are specified by id = N where N is the number of particles in the system. kwarg_buffers: A dictionary of ndarrays of shape S + [...]. This contains side data placed into the cell list. """ position_buffer: Array id_buffer: Array kwarg_buffers: Dict[str, Array] def _cell_dimensions(spatial_dimension: int, box_size: Box, minimum_cell_size: float) -> Tuple[Box, Array, Array, int]: """Compute the number of cells-per-side and total number of cells in a box.""" if isinstance(box_size, int) or isinstance(box_size, float): box_size = float(box_size) # NOTE(schsam): Should we auto-cast based on box_size? I can't imagine a case # in which the box_size would not be accurately represented by an f32. if (isinstance(box_size, onp.ndarray) and (box_size.dtype == jnp.int32 or box_size.dtype == jnp.int64)): box_size = float(box_size) cells_per_side = onp.floor(box_size / minimum_cell_size) cell_size = box_size / cells_per_side cells_per_side = onp.array(cells_per_side, dtype=jnp.int64) if isinstance(box_size, onp.ndarray): if box_size.ndim == 1 or box_size.ndim == 2: assert box_size.size == spatial_dimension flat_cells_per_side = onp.reshape(cells_per_side, (-1,)) for cells in flat_cells_per_side: if cells < 3: raise ValueError( ('Box must be at least 3x the size of the grid spacing in each ' 'dimension.')) cell_count = reduce(mul, flat_cells_per_side, 1) elif box_size.ndim == 0: cell_count = cells_per_side ** spatial_dimension else: raise ValueError('Box must either be a scalar or a vector.') else: cell_count = cells_per_side ** spatial_dimension return box_size, cell_size, cells_per_side, int(cell_count) def count_cell_filling(R: Array, box_size: Box, minimum_cell_size: float) -> Array: """Counts the number of particles per-cell in a spatial partition.""" dim = int(R.shape[1]) box_size, cell_size, cells_per_side, cell_count = \ _cell_dimensions(dim, box_size, minimum_cell_size) hash_multipliers = _compute_hash_constants(dim, cells_per_side) particle_index = jnp.array(R / cell_size, dtype=jnp.int64) particle_hash = jnp.sum(particle_index * hash_multipliers, axis=1) filling = ops.segment_sum(jnp.ones_like(particle_hash), particle_hash, cell_count) return filling def _is_variable_compatible_with_positions(R: Array) -> bool: if (util.is_array(R) and len(R.shape) == 2 and jnp.issubdtype(R.dtype, jnp.floating)): return True return False def _compute_hash_constants(spatial_dimension: int, cells_per_side: Array) -> Array: if cells_per_side.size == 1: return jnp.array([[cells_per_side ** d for d in range(spatial_dimension)]], dtype=jnp.int64) elif cells_per_side.size == spatial_dimension: one = jnp.array([[1]], dtype=jnp.int32) cells_per_side = jnp.concatenate((one, cells_per_side[:, :-1]), axis=1) return jnp.array(jnp.cumprod(cells_per_side), dtype=jnp.int64) else: raise ValueError() def _neighboring_cells(dimension: int) -> Generator[onp.ndarray, None, None]: for dindex in onp.ndindex(*([3] * dimension)): yield onp.array(dindex, dtype=jnp.int64) - 1 def _estimate_cell_capacity(R: Array, box_size: Box, cell_size: float, buffer_size_multiplier: float) -> int: # TODO(schsam): We might want to do something more sophisticated here or at # least expose this constant. spatial_dim = R.shape[-1] cell_capacity = onp.max(count_cell_filling(R, box_size, cell_size)) return int(cell_capacity * buffer_size_multiplier) def _unflatten_cell_buffer(arr: Array, cells_per_side: Array, dim: int) -> Array: if (isinstance(cells_per_side, int) or isinstance(cells_per_side, float) or (util.is_array(cells_per_side) and not cells_per_side.shape)): cells_per_side = (int(cells_per_side),) * dim elif util.is_array(cells_per_side) and len(cells_per_side.shape) == 1: cells_per_side = tuple([int(x) for x in cells_per_side[::-1]]) elif util.is_array(cells_per_side) and len(cells_per_side.shape) == 2: cells_per_side = tuple([int(x) for x in cells_per_side[0][::-1]]) else: raise ValueError() # TODO return jnp.reshape(arr, cells_per_side + (-1,) + arr.shape[1:]) def _shift_array(arr: onp.ndarray, dindex: Array) -> Array: if len(dindex) == 2: dx, dy = dindex dz = 0 elif len(dindex) == 3: dx, dy, dz = dindex if dx < 0: arr = jnp.concatenate((arr[1:], arr[:1])) elif dx > 0: arr = jnp.concatenate((arr[-1:], arr[:-1])) if dy < 0: arr = jnp.concatenate((arr[:, 1:], arr[:, :1]), axis=1) elif dy > 0: arr = jnp.concatenate((arr[:, -1:], arr[:, :-1]), axis=1) if dz < 0: arr = jnp.concatenate((arr[:, :, 1:], arr[:, :, :1]), axis=2) elif dz > 0: arr = jnp.concatenate((arr[:, :, -1:], arr[:, :, :-1]), axis=2) return arr def _vectorize(f: Callable, dim: int) -> Callable: if dim == 2: return vmap(vmap(f, 0, 0), 0, 0) elif dim == 3: return vmap(vmap(vmap(f, 0, 0), 0, 0), 0, 0) raise ValueError('Cell list only supports 2d or 3d.') def cell_list(box_size: Box, minimum_cell_size: float, cell_capacity_or_example_R: Union[int, Array], buffer_size_multiplier: float=1.1 ) -> Callable[[Array], CellList]: r"""Returns a function that partitions point data spatially. Given a set of points {x_i \in R^d} with associated data {k_i \in R^m} it is often useful to partition the points / data spatially. A simple partitioning that can be implemented efficiently within XLA is a dense partition into a uniform grid called a cell list. Since XLA requires that shapes be statically specified, we allocate fixed sized buffers for each cell. The size of this buffer can either be specified manually or it can be estimated automatically from a set of positions. Note, if the distribution of points changes significantly it is likely the buffer the buffer sizes will have to be adjusted. This partitioning will likely form the groundwork for parallelizing simulations over different accelerators. Args: box_size: A float or an ndarray of shape [spatial_dimension] specifying the size of the system. Note, this code is written for the case where the boundaries are periodic. If this is not the case, then the current code will be slightly less efficient. minimum_cell_size: A float specifying the minimum side length of each cell. Cells are enlarged so that they exactly fill the box. cell_capacity_or_example_R: Either an integer specifying the size number of particles that can be stored in each cell or an ndarray of positions of shape [particle_count, spatial_dimension] that is used to estimate the cell_capacity. buffer_size_multiplier: A floating point multiplier that multiplies the estimated cell capacity to allow for fluctuations in the maximum cell occupancy. Returns: A function `cell_list_fn(R, **kwargs)` that partitions positions, `R`, and side data specified by kwargs into a cell list. Returns a CellList containing the partition. """ if util.is_array(box_size): box_size = onp.array(box_size) if len(box_size.shape) == 1: box_size = jnp.reshape(box_size, (1, -1)) if util.is_array(minimum_cell_size): minimum_cell_size = onp.array(minimum_cell_size) cell_capacity = cell_capacity_or_example_R if _is_variable_compatible_with_positions(cell_capacity): cell_capacity = _estimate_cell_capacity( cell_capacity, box_size, minimum_cell_size, buffer_size_multiplier) elif not isinstance(cell_capacity, int): msg = ( 'cell_capacity_or_example_positions must either be an integer ' 'specifying the cell capacity or a set of positions that will be used ' 'to estimate a cell capacity. Found {}.'.format(type(cell_capacity)) ) raise ValueError(msg) def build_cells(R: Array, extra_capacity: int=0, **kwargs) -> CellList: N = R.shape[0] dim = R.shape[1] _cell_capacity = cell_capacity + extra_capacity if dim != 2 and dim != 3: # NOTE(schsam): Do we want to check this in compute_fn as well? raise ValueError( 'Cell list spatial dimension must be 2 or 3. Found {}'.format(dim)) neighborhood_tile_count = 3 ** dim _, cell_size, cells_per_side, cell_count = \ _cell_dimensions(dim, box_size, minimum_cell_size) hash_multipliers = _compute_hash_constants(dim, cells_per_side) # Create cell list data. particle_id = lax.iota(jnp.int64, N) # NOTE(schsam): We use the convention that particles that are successfully, # copied have their true id whereas particles empty slots have id = N. # Then when we copy data back from the grid, copy it to an array of shape # [N + 1, output_dimension] and then truncate it to an array of shape # [N, output_dimension] which ignores the empty slots. mask_id = jnp.ones((N,), jnp.int64) * N cell_R = jnp.zeros((cell_count * _cell_capacity, dim), dtype=R.dtype) cell_id = N * jnp.ones((cell_count * _cell_capacity, 1), dtype=i32) # It might be worth adding an occupied mask. However, that will involve # more compute since often we will do a mask for species that will include # an occupancy test. It seems easier to design around this empty_data_value # for now and revisit the issue if it comes up later. empty_kwarg_value = 10 ** 5 cell_kwargs = {} for k, v in kwargs.items(): if not util.is_array(v): raise ValueError(( 'Data must be specified as an ndarry. Found "{}" with ' 'type {}'.format(k, type(v)))) if v.shape[0] != R.shape[0]: raise ValueError( ('Data must be specified per-particle (an ndarray with shape ' '(R.shape[0], ...)). Found "{}" with shape {}'.format(k, v.shape))) kwarg_shape = v.shape[1:] if v.ndim > 1 else (1,) cell_kwargs[k] = empty_kwarg_value * jnp.ones( (cell_count * _cell_capacity,) + kwarg_shape, v.dtype) indices = jnp.array(R / cell_size, dtype=i32) hashes = jnp.sum(indices * hash_multipliers, axis=1) # Copy the particle data into the grid. Here we use a trick to allow us to # copy into all cells simultaneously using a single lax.scatter call. To do # this we first sort particles by their cell hash. We then assign each # particle to have a cell id = hash * cell_capacity + grid_id where grid_id # is a flat list that repeats 0, .., cell_capacity. So long as there are # fewer than cell_capacity particles per cell, each particle is guarenteed # to get a cell id that is unique. sort_map = jnp.argsort(hashes) sorted_R = R[sort_map] sorted_hash = hashes[sort_map] sorted_id = particle_id[sort_map] sorted_kwargs = {} for k, v in kwargs.items(): sorted_kwargs[k] = v[sort_map] sorted_cell_id = jnp.mod(lax.iota(jnp.int64, N), _cell_capacity) sorted_cell_id = sorted_hash * _cell_capacity + sorted_cell_id cell_R = ops.index_update(cell_R, sorted_cell_id, sorted_R) sorted_id = jnp.reshape(sorted_id, (N, 1)) cell_id = ops.index_update( cell_id, sorted_cell_id, sorted_id) cell_R = _unflatten_cell_buffer(cell_R, cells_per_side, dim) cell_id = _unflatten_cell_buffer(cell_id, cells_per_side, dim) for k, v in sorted_kwargs.items(): if v.ndim == 1: v = jnp.reshape(v, v.shape + (1,)) cell_kwargs[k] = ops.index_update(cell_kwargs[k], sorted_cell_id, v) cell_kwargs[k] = _unflatten_cell_buffer( cell_kwargs[k], cells_per_side, dim) return CellList(cell_R, cell_id, cell_kwargs) # pytype: disable=wrong-arg-count return build_cells def _displacement_or_metric_to_metric_sq( displacement_or_metric: DisplacementOrMetricFn) -> MetricFn: """Checks whether or not a displacement or metric was provided.""" for dim in range(1, 4): try: R = ShapedArray((dim,), f32) dR_or_dr = eval_shape(displacement_or_metric, R, R, t=0) if len(dR_or_dr.shape) == 0: return lambda Ra, Rb, **kwargs: \ displacement_or_metric(Ra, Rb, **kwargs) ** 2 else: return lambda Ra, Rb, **kwargs: space.square_distance( displacement_or_metric(Ra, Rb, **kwargs)) except TypeError: continue except ValueError: continue raise ValueError( 'Canonicalize displacement not implemented for spatial dimension larger' 'than 4.') class NeighborListFormat(Enum): """An enum listing the different neighbor list formats. Attributes: Dense: A dense neighbor list where the ids are a square matrix of shape `(N, max_neighbors_per_atom)`. Here the capacity of the neighbor list must scale with the highest connectivity neighbor. Sparse: A sparse neighbor list where the ids are a rectangular matrix of shape `(2, max_neighbors)` specifying the start / end particle of each neighbor pair. OrderedSparse: A sparse neighbor list whose format is the same as `Sparse` where only bonds with i < j are included. """ Dense = 0 Sparse = 1 OrderedSparse = 2 def is_sparse(format: NeighborListFormat) -> bool: return (format is NeighborListFormat.Sparse or format is NeighborListFormat.OrderedSparse) def is_format_valid(format: NeighborListFormat): if not format in list(NeighborListFormat): raise ValueError(( 'Neighbor list format must be a member of NeighorListFormat' f' found {format}.')) @dataclasses.dataclass class NeighborList(object): """A struct containing the state of a Neighbor List. Attributes: idx: For an N particle system this is an `[N, max_occupancy]` array of integers such that `idx[i, j]` is the jth neighbor of particle i. reference_position: The positions of particles when the neighbor list was constructed. This is used to decide whether the neighbor list ought to be updated. did_buffer_overflow: A boolean that starts out False. If there are ever more neighbors than max_neighbors this is set to true to indicate that there was a buffer overflow. If this happens, it means that the results of the simulation will be incorrect and the simulation needs to be rerun using a larger buffer. max_occupancy: A static integer specifying the maximum size of the neighbor list. Changing this will invoke a recompilation. format: A NeighborListFormat enum specifying the format of the neighbor list. cell_list_fn: A static python callable that is used to construct a cell list used in an intermediate step of the neighbor list calculation. update_fn: A static python function used to update the neighbor list. """ idx: Array reference_position: Array did_buffer_overflow: Array max_occupancy: int = dataclasses.static_field() format: NeighborListFormat = dataclasses.static_field() cell_list_fn: Callable[[Array], CellList] = dataclasses.static_field() update_fn: Callable[[Array, 'NeighborList'], 'NeighborList'] = dataclasses.static_field() def update(self, R, **kwargs): return self.update_fn(R, self, **kwargs) @dataclasses.dataclass class NeighborListFns: """A struct containing functions to allocate and update neighbor lists. Attributes: allocate: A function to allocate a new neighbor list. This function cannot be compiled, since it uses the values of positions to infer the shapes. update: A function to update a neighbor list given a new set of positions and a new neighbor list. """ allocate: Callable[..., NeighborList] = dataclasses.static_field() update: Callable[[Array, NeighborList], NeighborList] = dataclasses.static_field() def __call__(self, R: Array, neighbor_list: Optional[NeighborList]=None, extra_capacity: int=0, **kwargs) -> NeighborList: """A function for backward compatibility with previous neighbor lists. Attributes: R: An `(N, dim)` array of particle positions. neighbor_list: An optional neighor list object. If it is provided then the function updates the neighbor list, otherwise it allocates a new neighbor list. extra_capacity: Extra capacity to add if allocating the neighbor list. """ logging.warning('Using a depricated code path to create / update neighbor ' 'lists. It will be removed in a later version of JAX MD. ' 'Using `neighbor_fn.allocate` and `neighbor_fn.update` ' 'is preferred.') if neighbor_list is None: return self.allocate(R, extra_capacity, **kwargs) return self.update(R, neighbor_list, **kwargs) def __iter__(self): return iter((self.allocate, self.update)) NeighborFn = Callable[[Array, Optional[NeighborList], Optional[int]], NeighborList] def neighbor_list(displacement_or_metric: DisplacementOrMetricFn, box_size: Box, r_cutoff: float, dr_threshold: float, capacity_multiplier: float=1.25, disable_cell_list: bool=False, mask_self: bool=True, fractional_coordinates: bool=False, format: NeighborListFormat=NeighborListFormat.Dense, **static_kwargs) -> NeighborFn: """Returns a function that builds a list neighbors for collections of points. Neighbor lists must balance the need to be jit compatable with the fact that under a jit the maximum number of neighbors cannot change (owing to static shape requirements). To deal with this, our `neighbor_list` returns a `NeighborListFns` object that contains two functions: 1) `neighbor_fn.allocate` create a new neighbor list and 2) `neighbor_fn.update` updates an existing neighbor list. Neighbor lists themselves additionally have a convenience `update` member function. Note that allocation of a new neighbor list cannot be jit compiled since it uses the positions to infer the maximum number of neighbors (along with additional space specified by the `capacity_multiplier`). Updating the neighbor list can be jit compiled; if the neighbor list capacity is not sufficient to store all the neighbors, the `did_buffer_overflow` bit will be set to `True` and a new neighbor list will need to be reallocated. Here is a typical example of a simulation loop with neighbor lists: >>> init_fn, apply_fn = simulate.nve(energy_fn, shift, 1e-3) >>> exact_init_fn, exact_apply_fn = simulate.nve(exact_energy_fn, shift, 1e-3) >>> >>> nbrs = neighbor_fn.allocate(R) >>> state = init_fn(random.PRNGKey(0), R, neighbor_idx=nbrs.idx) >>> >>> def body_fn(i, state): >>> state, nbrs = state >>> nbrs = nbrs.update(state.position) >>> state = apply_fn(state, neighbor_idx=nbrs.idx) >>> return state, nbrs >>> >>> step = 0 >>> for _ in range(20): >>> new_state, nbrs = lax.fori_loop(0, 100, body_fn, (state, nbrs)) >>> if nbrs.did_buffer_overflow: >>> nbrs = neighbor_fn.allocate(state.position) >>> else: >>> state = new_state >>> step += 1 Args: displacement: A function `d(R_a, R_b)` that computes the displacement between pairs of points. box_size: Either a float specifying the size of the box or an array of shape [spatial_dim] specifying the box size in each spatial dimension. r_cutoff: A scalar specifying the neighborhood radius. dr_threshold: A scalar specifying the maximum distance particles can move before rebuilding the neighbor list. capacity_multiplier: A floating point scalar specifying the fractional increase in maximum neighborhood occupancy we allocate compared with the maximum in the example positions. disable_cell_list: An optional boolean. If set to True then the neighbor list is constructed using only distances. This can be useful for debugging but should generally be left as False. mask_self: An optional boolean. Determines whether points can consider themselves to be their own neighbors. fractional_coordinates: An optional boolean. Specifies whether positions will be supplied in fractional coordinates in the unit cube, [0, 1]^d. If this is set to True then the box_size will be set to 1.0 and the cell size used in the cell list will be set to cutoff / box_size. format: The format of the neighbor list; see the NeighborListFormat enum for details about the different choices for formats. Defaults to `Dense`. **static_kwargs: kwargs that get threaded through the calculation of example positions. Returns: A pair. The first element is a NeighborList containing the current neighbor list. The second element contains a function `neighbor_list_fn(R, neighbor_list=None)` that will update the neighbor list. If neighbor_list is None then the function will construct a new neighbor list whose capacity is inferred from R. If neighbor_list is given then it will update the neighbor list (with fixed capacity) if any particle has moved more than dr_threshold / 2. Note that only `neighbor_list_fn(R, neighbor_list)` can be `jit` since it keeps array shapes fixed. """ is_format_valid(format) box_size = lax.stop_gradient(box_size) r_cutoff = lax.stop_gradient(r_cutoff) dr_threshold = lax.stop_gradient(dr_threshold) box_size = f32(box_size) cutoff = r_cutoff + dr_threshold cutoff_sq = cutoff ** 2 threshold_sq = (dr_threshold / f32(2)) ** 2 metric_sq = _displacement_or_metric_to_metric_sq(displacement_or_metric) cell_size = cutoff if fractional_coordinates: cell_size = cutoff / box_size box_size = f32(1) use_cell_list = jnp.all(cell_size < box_size / 3.) and not disable_cell_list @jit def candidate_fn(R, **kwargs): return jnp.broadcast_to(jnp.reshape(jnp.arange(R.shape[0]), (1, R.shape[0])), (R.shape[0], R.shape[0])) @jit def cell_list_candidate_fn(cl, R, **kwargs): N, dim = R.shape R = cl.position_buffer idx = cl.id_buffer cell_idx = [idx] for dindex in _neighboring_cells(dim): if onp.all(dindex == 0): continue cell_idx += [_shift_array(idx, dindex)] cell_idx = jnp.concatenate(cell_idx, axis=-2) cell_idx = cell_idx[..., jnp.newaxis, :, :] cell_idx = jnp.broadcast_to(cell_idx, idx.shape[:-1] + cell_idx.shape[-2:]) def copy_values_from_cell(value, cell_value, cell_id): scatter_indices = jnp.reshape(cell_id, (-1,)) cell_value = jnp.reshape(cell_value, (-1,) + cell_value.shape[-2:]) return ops.index_update(value, scatter_indices, cell_value) # NOTE(schsam): Currently, this makes a verlet list that is larger than # needed since the idx buffer inherets its size from the cell-list. In # three-dimensions this seems to translate into an occupancy of ~70%. We # can make this more efficient by shrinking the verlet list at the cost of # another sort. However, this seems possibly less efficient than just # computing everything. neighbor_idx = jnp.zeros((N + 1,) + cell_idx.shape[-2:], jnp.int32) neighbor_idx = copy_values_from_cell(neighbor_idx, cell_idx, idx) return neighbor_idx[:-1, :, 0] @jit def mask_self_fn(idx): self_mask = idx == jnp.reshape(jnp.arange(idx.shape[0]), (idx.shape[0], 1)) return jnp.where(self_mask, idx.shape[0], idx) @jit def prune_neighbor_list_dense(R, idx, **kwargs): d = partial(metric_sq, **kwargs) d = space.map_neighbor(d) N = R.shape[0] neigh_R = R[idx] dR = d(R, neigh_R) mask = (dR < cutoff_sq) & (idx < N) out_idx = N * jnp.ones(idx.shape, jnp.int32) cumsum = jnp.cumsum(mask, axis=1) index = jnp.where(mask, cumsum - 1, idx.shape[1] - 1) p_index = jnp.arange(idx.shape[0])[:, None] out_idx = out_idx.at[p_index, index].set(idx) max_occupancy = jnp.max(cumsum[:, -1]) return out_idx[:, :-1], max_occupancy @jit def prune_neighbor_list_sparse(R, idx, **kwargs): d = partial(metric_sq, **kwargs) d = space.map_bond(d) N = R.shape[0] sender_idx = jnp.broadcast_to(jnp.arange(N)[:, None], idx.shape) sender_idx = jnp.reshape(sender_idx, (-1,)) receiver_idx = jnp.reshape(idx, (-1,)) dR = d(R[sender_idx], R[receiver_idx]) mask = (dR < cutoff_sq) & (receiver_idx < N) if format is NeighborListFormat.OrderedSparse: mask = mask & (receiver_idx < sender_idx) out_idx = N * jnp.ones(receiver_idx.shape, jnp.int32) cumsum = jnp.cumsum(mask) index = jnp.where(mask, cumsum - 1, len(receiver_idx) - 1) receiver_idx = out_idx.at[index].set(receiver_idx) sender_idx = out_idx.at[index].set(sender_idx) max_occupancy = cumsum[-1] return jnp.stack((receiver_idx[:-1], sender_idx[:-1])), max_occupancy def neighbor_list_fn(R: Array, neighbor_list: Optional[NeighborList]=None, extra_capacity: int=0, **kwargs) -> NeighborList: nbrs = neighbor_list def neighbor_fn(R_and_overflow, max_occupancy=None): R, overflow = R_and_overflow N = R.shape[0] if cell_list_fn is not None: cl = cell_list_fn(R) idx = cell_list_candidate_fn(cl, R, **kwargs) else: idx = candidate_fn(R, **kwargs) if mask_self: idx = mask_self_fn(idx) if is_sparse(format): idx, occupancy = prune_neighbor_list_sparse(R, idx, **kwargs) else: idx, occupancy = prune_neighbor_list_dense(R, idx, **kwargs) if max_occupancy is None: _extra_capacity = (extra_capacity if not is_sparse(format) else N * extra_capacity) max_occupancy = int(occupancy * capacity_multiplier + _extra_capacity) if max_occupancy > R.shape[0] and not is_sparse(format): max_occupancy = R.shape[0] padding = max_occupancy - occupancy if max_occupancy > occupancy: idx = jnp.concatenate( [idx, N * jnp.ones((idx.shape[0], padding), dtype=idx.dtype)], axis=1) idx = idx[:, :max_occupancy] update_fn = (neighbor_list_fn if neighbor_list is None else neighbor_list.update_fn) return NeighborList( idx, R, jnp.logical_or(overflow, (max_occupancy < occupancy)), max_occupancy, format, cell_list_fn, update_fn) # pytype: disable=wrong-arg-count if nbrs is None: cell_list_fn = (cell_list(box_size, cell_size, R, capacity_multiplier) if use_cell_list else None) return neighbor_fn((R, False)) else: cell_list_fn = nbrs.cell_list_fn neighbor_fn = partial(neighbor_fn, max_occupancy=nbrs.max_occupancy) d = partial(metric_sq, **kwargs) d = vmap(d) return lax.cond( jnp.any(d(R, nbrs.reference_position) > threshold_sq), (R, nbrs.did_buffer_overflow), neighbor_fn, nbrs, lambda x: x) return NeighborListFns(lambda R, extra_capacity=0, **kwargs: neighbor_list_fn(R, extra_capacity=extra_capacity, **kwargs), lambda R, nbrs, **kwargs: # pytype: disable=wrong-arg-count neighbor_list_fn(R, nbrs, **kwargs)) def neighbor_list_mask(neighbor: NeighborList, mask_self: bool=False) -> Array: """Compute a mask for neighbor list.""" if is_sparse(neighbor.format): mask = neighbor.idx[0] < len(neighbor.reference_position) if mask_self: mask = mask & (neighbor.idx[0] != neighbor.idx[1]) return mask mask = neighbor.idx < len(neighbor.idx) if mask_self: N = len(neighbor.reference_position) self_mask = neighbor.idx != jnp.reshape(jnp.arange(N), (N, 1)) mask = mask & self_mask return mask def to_jraph(neighbor: NeighborList, mask: Array=None) -> jraph.GraphsTuple: """Convert a sparse neighbor list to a `jraph.GraphsTuple`. As in jraph, padding here is accomplished by adding a ficticious graph with a single node. Args: neighbor: A neighbor list that we will convert to the jraph format. Must be sparse. mask: An optional mask on the edges. Returns: A `jraph.GraphsTuple` that contains the topology of the neighbor list. """ if not is_sparse(neighbor.format): raise ValueError('Cannot convert a dense neighbor list to jraph format. ' 'Please use either NeighborListFormat.Sparse or ' 'NeighborListFormat.OrderedSparse.') receivers, senders = neighbor.idx N = len(neighbor.reference_position) _mask = neighbor_list_mask(neighbor) if mask is not None: _mask = _mask & mask cumsum = jnp.cumsum(_mask) index = jnp.where(_mask, cumsum - 1, len(receivers)) ordered = N * jnp.ones((len(receivers) + 1,), jnp.int32) receivers = ordered.at[index].set(receivers)[:-1] senders = ordered.at[index].set(senders)[:-1] mask = receivers < N return jraph.GraphsTuple( nodes=None, edges=None, receivers=receivers, senders=senders, globals=None, n_node=jnp.array([N, 1]), n_edge=jnp.array([jnp.sum(_mask), jnp.sum(~_mask)]), ) def to_dense(neighbor: NeighborList) -> Array: """Converts a sparse neighbor list to dense ids. Cannot be JIT.""" if neighbor.format is not Sparse: raise ValueError('Can only convert sparse neighbor lists to dense ones.') receivers, senders = neighbor.idx mask = neighbor_list_mask(neighbor) receivers = receivers[mask] senders = senders[mask] N = len(neighbor.reference_position) count = ops.segment_sum(jnp.ones(len(receivers), jnp.int32), receivers, N) max_count = jnp.max(count) offset = jnp.tile(jnp.arange(max_count), N)[:len(senders)] hashes = senders * max_count + offset dense_idx = N * jnp.ones((N * max_count,), jnp.int32) dense_idx = dense_idx.at[hashes].set(receivers).reshape((N, max_count)) return dense_idx Dense = NeighborListFormat.Dense Sparse = NeighborListFormat.Sparse OrderedSparse = NeighborListFormat.OrderedSparse
2.359375
2
rhucrl_experiments/evaluate/launch_evaluate_mass.py
sebascuri/rhucrl
1
2507
<filename>rhucrl_experiments/evaluate/launch_evaluate_mass.py<gh_stars>1-10 """Run from rhucrl_experiments.evaluate folder.""" import socket from lsf_runner import init_runner, make_commands from rhucrl_experiments.evaluate.utilities import ENVIRONMENTS RARL_DIR = "../../runs/RARLAgent" ZERO_SUM_DIR = "../../runs/ZeroSumAgent" SCRIPT = "evaluate_mass_change.py" EXPERIMENTS = { "supermodularity": {"algorithm": "RARL_MF", "base-dir": RARL_DIR}, "shallow": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR}, "greedy": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR}, "lazy": {"algorithm": "HUCRL", "base-dir": RARL_DIR}, }.get(socket.gethostname(), {"algorithm": "RARL", "base-dir": RARL_DIR}) runner = init_runner("EvaluateMassChange.", num_threads=4) for seed in [0, 1, 2, 3, 4]: base_args = {"num-runs": 10, "seed": seed} base_args.update(**EXPERIMENTS) commands = make_commands( SCRIPT, base_args=base_args, common_hyper_args={"environment": ENVIRONMENTS} ) runner.run_batch(commands)
1.84375
2
src/sentry/api/endpoints/project_tags.py
seukjung/sentry-custom
1
2508
<filename>src/sentry/api/endpoints/project_tags.py from __future__ import absolute_import import six from rest_framework.response import Response from sentry.api.bases.project import ProjectEndpoint from sentry.models import TagKey, TagKeyStatus class ProjectTagsEndpoint(ProjectEndpoint): def get(self, request, project): tag_keys = TagKey.objects.filter( project=project, status=TagKeyStatus.VISIBLE, ) data = [] for tag_key in tag_keys: data.append({ 'id': six.text_type(tag_key.id), 'key': TagKey.get_standardized_key(tag_key.key), 'name': tag_key.get_label(), 'uniqueValues': tag_key.values_seen, }) return Response(data)
2.0625
2
examples/02 - callbacks/callbacks.py
TensorTom/async-Eel
9
2509
from __future__ import print_function # For Py2/3 compatibility import async_eel import random import asyncio loop = asyncio.get_event_loop() @async_eel.expose async def py_random(): return random.random() async def print_num(n): """callback of js_random""" print('Got this from Javascript:', n) async def main(): try: async_eel.init('web') await async_eel.start('callbacks.html', size=(400, 300)) # Call Javascript function, and pass explicit callback function await async_eel.js_random()(print_num) # Do the same with an inline callback await async_eel.js_random()(lambda n: print('2Got this from Javascript:', n)) except Exception: import traceback traceback.print_exc() if __name__ == '__main__': asyncio.run_coroutine_threadsafe(main(), loop) loop.run_forever()
3.125
3
datacube/index/_api.py
AMA-AC/datacube-core
2
2510
<reponame>AMA-AC/datacube-core # coding=utf-8 """ Access methods for indexing datasets & products. """ import logging from datacube.config import LocalConfig from datacube.drivers import index_driver_by_name, index_drivers from .index import Index _LOG = logging.getLogger(__name__) def index_connect(local_config=None, application_name=None, validate_connection=True): # type: (LocalConfig, str, bool) -> Index """ Create a Data Cube Index that can connect to a PostgreSQL server It contains all the required connection parameters, but doesn't actually check that the server is available. :param application_name: A short, alphanumeric name to identify this application. :param datacube.config.LocalConfig local_config: Config object to use. (optional) :param validate_connection: Validate database connection and schema immediately :rtype: datacube.index.index.Index :raises datacube.drivers.postgres._connections.IndexSetupError: """ if local_config is None: local_config = LocalConfig.find() driver_name = local_config.get('index_driver', 'default') index_driver = index_driver_by_name(driver_name) if not index_driver: raise RuntimeError( "No index driver found for %r. %s available: %s" % ( driver_name, len(index_drivers()), ', '.join(index_drivers()) ) ) return index_driver.connect_to_index(local_config, application_name=application_name, validate_connection=validate_connection)
2.78125
3
pgarchives/loader/load_message.py
WeilerWebServices/PostgreSQL
0
2511
<filename>pgarchives/loader/load_message.py #!/usr/bin/env python3 # # load_message.py - takes a single email or mbox formatted # file on stdin or in a file and reads it into the database. # import os import sys from optparse import OptionParser from configparser import ConfigParser import psycopg2 from lib.storage import ArchivesParserStorage from lib.mbox import MailboxBreakupParser from lib.exception import IgnorableException from lib.log import log, opstatus from lib.varnish import VarnishPurger def log_failed_message(listid, srctype, src, msg, err): try: msgid = msg.msgid except Exception: msgid = "<unknown>" log.error("Failed to load message (msgid %s) from %s, spec %s: %s" % (msgid, srctype, src, err)) # We also put the data in the db. This happens in the main transaction # so if the whole script dies, it goes away... conn.cursor().execute("INSERT INTO loaderrors (listid, msgid, srctype, src, err) VALUES (%(listid)s, %(msgid)s, %(srctype)s, %(src)s, %(err)s)", { 'listid': listid, 'msgid': msgid, 'srctype': srctype, 'src': src, 'err': str(str(err), 'us-ascii', 'replace'), }) if __name__ == "__main__": optparser = OptionParser() optparser.add_option('-l', '--list', dest='list', help='Name of list to load message for') optparser.add_option('-d', '--directory', dest='directory', help='Load all messages in directory') optparser.add_option('-m', '--mbox', dest='mbox', help='Load all messages in mbox') optparser.add_option('-i', '--interactive', dest='interactive', action='store_true', help='Prompt after each message') optparser.add_option('-v', '--verbose', dest='verbose', action='store_true', help='Verbose output') optparser.add_option('--force-date', dest='force_date', help='Override date (used for dates that can\'t be parsed)') optparser.add_option('--filter-msgid', dest='filter_msgid', help='Only process message with given msgid') (opt, args) = optparser.parse_args() if (len(args)): print("No bare arguments accepted") optparser.print_usage() sys.exit(1) if not opt.list: print("List must be specified") optparser.print_usage() sys.exit(1) if opt.directory and opt.mbox: print("Can't specify both directory and mbox!") optparser.print_usage() sys.exit(1) if opt.force_date and (opt.directory or opt.mbox) and not opt.filter_msgid: print("Can't use force_date with directory or mbox - only individual messages") optparser.print_usage() sys.exit(1) if opt.filter_msgid and not (opt.directory or opt.mbox): print("filter_msgid makes no sense without directory or mbox!") optparser.print_usage() sys.exit(1) log.set(opt.verbose) cfg = ConfigParser() cfg.read('%s/archives.ini' % os.path.realpath(os.path.dirname(sys.argv[0]))) try: connstr = cfg.get('db', 'connstr') except Exception: connstr = 'need_connstr' conn = psycopg2.connect(connstr) curs = conn.cursor() # Take an advisory lock to force serialization. # We could do this "properly" by reordering operations and using ON CONFLICT, # but concurrency is not that important and this is easier... try: curs.execute("SET statement_timeout='30s'") curs.execute("SELECT pg_advisory_xact_lock(8059944559669076)") except Exception as e: print(("Failed to wait on advisory lock: %s" % e)) sys.exit(1) # Get the listid we're working on curs.execute("SELECT listid FROM lists WHERE listname=%(list)s", { 'list': opt.list }) r = curs.fetchall() if len(r) != 1: log.error("List %s not found" % opt.list) conn.close() sys.exit(1) listid = r[0][0] purges = set() if opt.directory: # Parse all files in directory for x in os.listdir(opt.directory): log.status("Parsing file %s" % x) with open(os.path.join(opt.directory, x)) as f: ap = ArchivesParserStorage() ap.parse(f) if opt.filter_msgid and not ap.is_msgid(opt.filter_msgid): continue try: ap.analyze(date_override=opt.force_date) except IgnorableException as e: log_failed_message(listid, "directory", os.path.join(opt.directory, x), ap, e) opstatus.failed += 1 continue ap.store(conn, listid) purges.update(ap.purges) if opt.interactive: print("Interactive mode, committing transaction") conn.commit() print("Proceed to next message with Enter, or input a period (.) to stop processing") x = input() if x == '.': print("Ok, aborting!") break print("---------------------------------") elif opt.mbox: if not os.path.isfile(opt.mbox): print("File %s does not exist" % opt.mbox) sys.exit(1) mboxparser = MailboxBreakupParser(opt.mbox) while not mboxparser.EOF: ap = ArchivesParserStorage() msg = next(mboxparser) if not msg: break ap.parse(msg) if opt.filter_msgid and not ap.is_msgid(opt.filter_msgid): continue try: ap.analyze(date_override=opt.force_date) except IgnorableException as e: log_failed_message(listid, "mbox", opt.mbox, ap, e) opstatus.failed += 1 continue ap.store(conn, listid) purges.update(ap.purges) if mboxparser.returncode(): log.error("Failed to parse mbox:") log.error(mboxparser.stderr_output()) sys.exit(1) else: # Parse single message on stdin ap = ArchivesParserStorage() ap.parse(sys.stdin.buffer) try: ap.analyze(date_override=opt.force_date) except IgnorableException as e: log_failed_message(listid, "stdin", "", ap, e) conn.close() sys.exit(1) ap.store(conn, listid) purges.update(ap.purges) if opstatus.stored: log.log("Stored message with message-id %s" % ap.msgid) conn.commit() conn.close() opstatus.print_status() VarnishPurger(cfg).purge(purges)
2.703125
3
shop/migrations/0009_auto_20200310_1430.py
manson800819/test
0
2512
<filename>shop/migrations/0009_auto_20200310_1430.py # -*- coding: utf-8 -*- # Generated by Django 1.11.29 on 2020-03-10 14:30 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('shop', '0008_auto_20200310_1134'), ] operations = [ migrations.RemoveField( model_name='category', name='id', ), migrations.AlterField( model_name='category', name='name', field=models.CharField(db_index=True, max_length=200, primary_key=True, serialize=False), ), migrations.AlterField( model_name='product', name='type1', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='shop.Type1'), ), ]
1.359375
1
lib/dataset/iNaturalist.py
jrcai/ACE
18
2513
from dataset.baseset import BaseSet import random, cv2 import numpy as np class iNaturalist(BaseSet): def __init__(self, mode='train', cfg=None, transform=None): super(iNaturalist, self).__init__(mode, cfg, transform) random.seed(0) self.class_dict = self._get_class_dict() def __getitem__(self, index): if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and self.mode == 'train': assert self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE in ["balance", 'square', 'progressive'] if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "balance": sample_class = random.randint(0, self.num_classes - 1) elif self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "square": sample_class = np.random.choice(np.arange(self.num_classes), p=self.square_p) else: sample_class = np.random.choice(np.arange(self.num_classes), p=self.progress_p) sample_indexes = self.class_dict[sample_class] index = random.choice(sample_indexes) now_info = self.data[index] img = self._get_image(now_info) image = self.transform(img) meta = dict() image_label = now_info['category_id'] # 0-index return image, image_label, meta
2.59375
3
tests/test_conferences.py
mattclark/osf.io
0
2514
# -*- coding: utf-8 -*- import mock from nose.tools import * # noqa (PEP8 asserts) import hmac import hashlib from StringIO import StringIO from django.core.exceptions import ValidationError from django.db import IntegrityError import furl from framework.auth import get_or_create_user from framework.auth.core import Auth from osf.models import OSFUser, AbstractNode from addons.wiki.models import WikiVersion from osf.exceptions import BlacklistedEmailError from website import settings from website.conferences import views from website.conferences import utils, message from website.util import api_url_for, web_url_for from tests.base import OsfTestCase, fake from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory def assert_absolute(url): parsed_domain = furl.furl(settings.DOMAIN) parsed_url = furl.furl(url) assert_equal(parsed_domain.host, parsed_url.host) def assert_equal_urls(first, second): parsed_first = furl.furl(first) parsed_first.port = None parsed_second = furl.furl(second) parsed_second.port = None assert_equal(parsed_first, parsed_second) def create_fake_conference_nodes(n, conference): nodes = [] for i in range(n): node = ProjectFactory(is_public=True) conference.submissions.add(node) node.save() nodes.append(node) return nodes def create_fake_conference_nodes_bad_data(conference, n, bad_n, endpoint): nodes = [] for i in range(n): node = ProjectFactory(is_public=True) conference.submissions.add(node) # inject bad data if i < bad_n: # Delete only contributor node.contributor_set.filter(user=node.contributors.first()).delete() node.save() nodes.append(node) return nodes class TestConferenceUtils(OsfTestCase): def test_get_or_create_user_exists(self): user = UserFactory() fetched, created = get_or_create_user(user.fullname, user.username, is_spam=True) assert_false(created) assert_equal(user._id, fetched._id) assert_false('is_spam' in fetched.system_tags) def test_get_or_create_user_not_exists(self): fullname = '<NAME>' username = '<EMAIL>' fetched, created = get_or_create_user(fullname, username, is_spam=False) fetched.save() # in order to access m2m fields, e.g. tags assert_true(created) assert_equal(fetched.fullname, fullname) assert_equal(fetched.username, username) assert_false('is_spam' in fetched.system_tags) def test_get_or_create_user_is_spam(self): fullname = '<NAME>' username = '<EMAIL>' fetched, created = get_or_create_user(fullname, username, is_spam=True) fetched.save() # in order to access m2m fields, e.g. tags assert_true(created) assert_equal(fetched.fullname, fullname) assert_equal(fetched.username, username) assert_true('is_spam' in fetched.system_tags) def test_get_or_create_user_with_blacklisted_domain(self): fullname = 'Kanye West' username = '<EMAIL>' with assert_raises(BlacklistedEmailError) as e: get_or_create_user(fullname, username, is_spam=True) assert_equal(e.exception.message, 'Invalid Email') class ContextTestCase(OsfTestCase): MAILGUN_API_KEY = 'mailkimp' @classmethod def setUpClass(cls): super(ContextTestCase, cls).setUpClass() settings.MAILGUN_API_KEY, cls._MAILGUN_API_KEY = cls.MAILGUN_API_KEY, settings.MAILGUN_API_KEY @classmethod def tearDownClass(cls): super(ContextTestCase, cls).tearDownClass() settings.MAILGUN_API_KEY = cls._MAILGUN_API_KEY def make_context(self, method='POST', **kwargs): data = { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), } data.update(kwargs.pop('data', {})) data = { key: value for key, value in data.items() if value is not None } return self.app.app.test_request_context(method=method, data=data, **kwargs) class TestProvisionNode(ContextTestCase): def setUp(self): super(TestProvisionNode, self).setUp() self.node = ProjectFactory() self.user = self.node.creator self.conference = ConferenceFactory() self.body = 'dragon on my back' self.content = 'dragon attack' self.attachment = StringIO(self.content) self.recipient = '{0}{1}-<EMAIL>'.format( 'test-' if settings.DEV_MODE else '', self.conference.endpoint, ) def make_context(self, **kwargs): data = { 'attachment-count': '1', 'attachment-1': (self.attachment, 'attachment-1'), 'X-Mailgun-Sscore': 0, 'recipient': self.recipient, 'stripped-text': self.body, } data.update(kwargs.pop('data', {})) return super(TestProvisionNode, self).make_context(data=data, **kwargs) def test_provision(self): with self.make_context(): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_true(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_in(self.conference.endpoint, self.node.system_tags) assert self.node in self.conference.submissions.all() assert_not_in('spam', self.node.system_tags) def test_provision_private(self): self.conference.public_projects = False self.conference.save() with self.make_context(): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_false(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_not_in('spam', self.node.system_tags) def test_provision_spam(self): with self.make_context(data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_false(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_in('spam', self.node.system_tags) @mock.patch('website.conferences.utils.waterbutler_api_url_for') @mock.patch('website.conferences.utils.requests.put') def test_upload(self, mock_put, mock_get_url): mock_get_url.return_value = 'http://queen.com/' file_name = 'hammer-to-fall' self.attachment.filename = file_name self.attachment.content_type = 'application/json' utils.upload_attachment(self.user, self.node, self.attachment) mock_get_url.assert_called_with( self.node._id, 'osfstorage', _internal=True, base_url=self.node.osfstorage_region.waterbutler_url, cookie=self.user.get_or_create_cookie(), name=file_name ) mock_put.assert_called_with( mock_get_url.return_value, data=self.content, ) @mock.patch('website.conferences.utils.waterbutler_api_url_for') @mock.patch('website.conferences.utils.requests.put') def test_upload_no_file_name(self, mock_put, mock_get_url): mock_get_url.return_value = 'http://queen.com/' self.attachment.filename = '' self.attachment.content_type = 'application/json' utils.upload_attachment(self.user, self.node, self.attachment) mock_get_url.assert_called_with( self.node._id, 'osfstorage', _internal=True, base_url=self.node.osfstorage_region.waterbutler_url, cookie=self.user.get_or_create_cookie(), name=settings.MISSING_FILE_NAME, ) mock_put.assert_called_with( mock_get_url.return_value, data=self.content, ) @mock.patch('website.conferences.utils.upload_attachments') def test_add_poster_by_email(self, mock_upload_attachments): conference = ConferenceFactory() with self.make_context(data={'from': '<EMAIL>', 'subject': 'It\'s PARTY TIME!'}): msg = message.ConferenceMessage() views.add_poster_by_email(conference, msg) user = OSFUser.objects.get(username='<EMAIL>') assert user.email == '<EMAIL>' assert user.fullname == user._id # user's shouldn't be able to use email as fullname, so we use the guid. class TestMessage(ContextTestCase): PUSH_CONTEXT = False def test_verify_signature_valid(self): with self.make_context(): msg = message.ConferenceMessage() msg.verify_signature() def test_verify_signature_invalid(self): with self.make_context(data={'signature': 'fake'}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.verify_signature() def test_is_spam_false_missing_headers(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1}, ) with ctx: msg = message.ConferenceMessage() assert not msg.is_spam def test_is_spam_false_all_headers(self): ctx = self.make_context( method='POST', data={ 'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1, 'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0], 'X-Mailgun-Spf': message.SPF_PASS_VALUES[0], }, ) with ctx: msg = message.ConferenceMessage() assert not msg.is_spam def test_is_spam_true_sscore(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}, ) with ctx: msg = message.ConferenceMessage() assert msg.is_spam def test_is_spam_true_dkim(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0][::-1]}, ) with ctx: msg = message.ConferenceMessage() assert msg.is_spam def test_is_spam_true_spf(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Spf': message.SPF_PASS_VALUES[0][::-1]}, ) with ctx: msg = message.ConferenceMessage() assert msg.is_spam def test_subject(self): ctx = self.make_context( method='POST', data={'subject': 'RE: Hip Hopera'}, ) with ctx: msg = message.ConferenceMessage() assert_equal(msg.subject, 'Hip Hopera') def test_recipient(self): address = '<EMAIL>' ctx = self.make_context( method='POST', data={'recipient': address}, ) with ctx: msg = message.ConferenceMessage() assert_equal(msg.recipient, address) def test_text(self): text = 'welcome to my nuclear family' ctx = self.make_context( method='POST', data={'stripped-text': text}, ) with ctx: msg = message.ConferenceMessage() assert_equal(msg.text, text) def test_sender_name(self): names = [ (' Fred', 'Fred'), (u'Me䬟', u'Me䬟'), (u'<EMAIL>', u'<EMAIL>'), (u'Fred <<EMAIL>>', u'Fred'), (u'"Fred" <<EMAIL>>', u'Fred'), ] for name in names: with self.make_context(data={'from': name[0]}): msg = message.ConferenceMessage() assert_equal(msg.sender_name, name[1]) def test_sender_email(self): emails = [ (u'<EMAIL>', u'<EMAIL>'), (u'<EMAIL>', u'<EMAIL>') ] for email in emails: with self.make_context(data={'from': email[0]}): msg = message.ConferenceMessage() assert_equal(msg.sender_email, email[1]) def test_route_invalid_pattern(self): with self.make_context(data={'recipient': '<EMAIL>'}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.route def test_route_invalid_test(self): recipient = '{0}<EMAIL>'.format('' if settings.DEV_MODE else 'stage-') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.route def test_route_valid_alternate(self): conf = ConferenceFactory(endpoint='chocolate', active=True) conf.name = 'Chocolate Conference' conf.field_names['submission2'] = 'data' conf.save() recipient = '{0}<EMAIL>'.format('test-' if settings.DEV_MODE else '') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() assert_equal(msg.conference_name, 'chocolate') assert_equal(msg.conference_category, 'data') conf.__class__.delete(conf) def test_route_valid_b(self): recipient = '{0}<EMAIL>'.format('test-' if settings.DEV_MODE else '') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() assert_equal(msg.conference_name, 'conf') assert_equal(msg.conference_category, 'poster') def test_alternate_route_invalid(self): recipient = '{0}<EMAIL>'.format('test-' if settings.DEV_MODE else '') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.route def test_attachments_count_zero(self): with self.make_context(data={'attachment-count': '0'}): msg = message.ConferenceMessage() assert_equal(msg.attachments, []) def test_attachments_count_one(self): content = 'slightly mad' sio = StringIO(content) ctx = self.make_context( method='POST', data={ 'attachment-count': 1, 'attachment-1': (sio, 'attachment-1'), }, ) with ctx: msg = message.ConferenceMessage() assert_equal(len(msg.attachments), 1) assert_equal(msg.attachments[0].read(), content) class TestConferenceEmailViews(OsfTestCase): def test_redirect_to_meetings_url(self): url = '/presentations/' res = self.app.get(url) assert_equal(res.status_code, 302) res = res.follow() assert_equal(res.request.path, '/meetings/') def test_conference_submissions(self): AbstractNode.objects.all().delete() conference1 = ConferenceFactory() conference2 = ConferenceFactory() # Create conference nodes create_fake_conference_nodes( 3, conference1, ) create_fake_conference_nodes( 2, conference2, ) url = api_url_for('conference_submissions') res = self.app.get(url) assert_equal(res.json['success'], True) def test_conference_plain_returns_200(self): conference = ConferenceFactory() url = web_url_for('conference_results__plain', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) def test_conference_data(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 create_fake_conference_nodes( n_conference_nodes, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes) # Regression for OSF-8864 to confirm bad project data does not make whole conference break def test_conference_bad_data(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 n_conference_nodes_bad = 1 create_fake_conference_nodes_bad_data( conference, n_conference_nodes, n_conference_nodes_bad, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes - n_conference_nodes_bad) def test_conference_data_url_upper(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 create_fake_conference_nodes( n_conference_nodes, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint.upper()) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes) def test_conference_data_tag_upper(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 create_fake_conference_nodes( n_conference_nodes, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes) def test_conference_results(self): conference = ConferenceFactory() url = web_url_for('conference_results', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) def test_confererence_results_endpoint_is_case_insensitive(self): ConferenceFactory(endpoint='StudySwap') url = web_url_for('conference_results', meeting='studyswap') res = self.app.get(url) assert_equal(res.status_code, 200) class TestConferenceModel(OsfTestCase): def test_endpoint_is_required(self): with assert_raises(IntegrityError): ConferenceFactory(endpoint=None, name=fake.company()).save() def test_name_is_required(self): with assert_raises(IntegrityError): ConferenceFactory(endpoint='spsp2014', name=None).save() def test_default_field_names(self): conf = ConferenceFactory(endpoint='cookie', name='Cookies Conference') conf.save() assert_equal(conf.field_names['submission1'], 'poster') assert_equal(conf.field_names['mail_subject'], 'Presentation title') def test_conference_valid_submissions(self): conf = ConferenceFactory(endpoint='Hamburgers', name='Hamburger conference') conf.save() # 3 good nodes added create_fake_conference_nodes(3, conf) # Deleted node added deleted_node = ProjectFactory(is_public=True) deleted_node.is_deleted = True deleted_node.save() conf.submissions.add(deleted_node) # Private node added private_node = ProjectFactory(is_public=False) conf.submissions.add(private_node) assert_equal(conf.submissions.count(), 5) assert_equal(conf.valid_submissions.count(), 3) class TestConferenceIntegration(ContextTestCase): @mock.patch('website.conferences.views.send_mail') @mock.patch('website.conferences.utils.upload_attachments') def test_integration(self, mock_upload, mock_send_mail): fullname = '<NAME>' username = '<EMAIL>' title = 'good songs' conference = ConferenceFactory() body = 'dragon on my back' content = 'dragon attack' recipient = '{0}{1}-<EMAIL>'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': '{0} <{1}>'.format(fullname, username), 'recipient': recipient, 'subject': title, 'stripped-text': body, }, upload_files=[ ('attachment-1', 'attachment-1', content), ], ) assert_true(mock_upload.called) users = OSFUser.objects.filter(username=username) assert_equal(users.count(), 1) nodes = AbstractNode.objects.filter(title=title) assert_equal(nodes.count(), 1) node = nodes[0] assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body) assert_true(mock_send_mail.called) call_args, call_kwargs = mock_send_mail.call_args assert_absolute(call_kwargs['conf_view_url']) assert_absolute(call_kwargs['set_password_url']) assert_absolute(call_kwargs['profile_url']) assert_absolute(call_kwargs['file_url']) assert_absolute(call_kwargs['node_url']) @mock.patch('website.conferences.views.send_mail') def test_integration_inactive(self, mock_send_mail): conference = ConferenceFactory(active=False) fullname = '<NAME>' username = '<EMAIL>' title = 'good songs' body = 'dragon on my back' recipient = '{0}{1}-<EMAIL>'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) res = self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': '{0} <{1}>'.format(fullname, username), 'recipient': recipient, 'subject': title, 'stripped-text': body, }, expect_errors=True, ) assert_equal(res.status_code, 406) call_args, call_kwargs = mock_send_mail.call_args assert_equal(call_args, (username, views.CONFERENCE_INACTIVE)) assert_equal(call_kwargs['fullname'], fullname) assert_equal_urls( call_kwargs['presentations_url'], web_url_for('conference_view', _absolute=True), ) @mock.patch('website.conferences.views.send_mail') @mock.patch('website.conferences.utils.upload_attachments') def test_integration_wo_full_name(self, mock_upload, mock_send_mail): username = '<EMAIL>' title = 'no full name only email' conference = ConferenceFactory() body = 'dragon on my back' content = 'dragon attack' recipient = '{0}{1}-<EMAIL>'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': username, 'recipient': recipient, 'subject': title, 'stripped-text': body, }, upload_files=[ ('attachment-1', 'attachment-1', content), ], ) assert_true(mock_upload.called) users = OSFUser.objects.filter(username=username) assert_equal(users.count(), 1) nodes = AbstractNode.objects.filter(title=title) assert_equal(nodes.count(), 1) node = nodes[0] assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body) assert_true(mock_send_mail.called) call_args, call_kwargs = mock_send_mail.call_args assert_absolute(call_kwargs['conf_view_url']) assert_absolute(call_kwargs['set_password_url']) assert_absolute(call_kwargs['profile_url']) assert_absolute(call_kwargs['file_url']) assert_absolute(call_kwargs['node_url']) @mock.patch('website.conferences.views.send_mail') @mock.patch('website.conferences.utils.upload_attachments') def test_create_conference_node_with_same_name_as_existing_node(self, mock_upload, mock_send_mail): conference = ConferenceFactory() user = UserFactory() title = 'Long Live Greg' ProjectFactory(creator=user, title=title) body = 'Greg is a good plant' content = 'Long may they reign.' recipient = '{0}{1}-<EMAIL>'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': '{0} <{1}>'.format(user.fullname, user.username), 'recipient': recipient, 'subject': title, 'stripped-text': body, }, upload_files=[ ('attachment-1', 'attachment-1', content), ], ) assert AbstractNode.objects.filter(title=title, creator=user).count() == 2 assert mock_upload.called assert mock_send_mail.called
2.015625
2
socket_tentacles/__init__.py
innovationgarage/socket-tentacles
0
2515
import socketserver import socket import sys import threading import json import queue import time import datetime import traceback class TCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer): def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(self.server_address) class Listener(threading.Thread): def run(self): kwargs = self._kwargs print("Listener: Started: %s" % kwargs) Handler = self._kwargs["handler"] server = self._kwargs["server"] class Server(socketserver.BaseRequestHandler): def handle(self): print("Listener: Connection request received: %s" % kwargs) Handler(server, self.request) self.server = TCPServer((kwargs["host"], kwargs["port"]), Server) self.server.serve_forever() def stop(self): self.server.shutdown() self.server.server_close() class Connector(threading.Thread): def __init__(self, *arg, **kw): self.is_stopping = False threading.Thread.__init__(self, *arg, **kw) def run(self): print("Connector: Started: %s" % self._kwargs) while not self.is_stopping: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: try: sock.connect((self._kwargs["host"], self._kwargs["port"])) print("Connector: Connected: %s" % self._kwargs) self._kwargs["handler"](self._kwargs["server"], sock) except Exception as e: print(e) traceback.print_exc() finally: sock.close() time.sleep(1) def stop(self): self.is_stopping = True class Handler(object): encoding = "utf-8" binary = False filemode = "r" def __init__(self, server, conn): self.server = server self.conn = conn self.makefile() self.handle() def makefile(self): args = {"mode": self.filemode + ["", "b"][self.binary]} if not self.binary: args["encoding"] = self.encoding self.file = self.conn.makefile(**args) def handle(self): """self.conn is a socket object, self.file a file wrapper for that socket""" def __hash__(self): return id(self) class ReceiveHandler(Handler): filemode = "r" class SendHandler(Handler): filemode = "w" class Server(object): def __init__(self, handlers): self.handlers = handlers self.config = None self.servers = {} def configure(self, config): self.config = config connections = {self.connection_key(connection): connection for connection in config["connections"]} to_create = connections.keys() - self.servers.keys() to_destroy = self.servers.keys() - connections.keys() for key in to_create: server = self.start_connection(connections[key]) server.start() self.servers[key] = server for key in to_destroy: server = self.servers.pop(key) server.stop() def connection_key(self, connection): return json.dumps(connection, sort_keys=True, separators=(',', ':')) def start_connection(self, connection): handler = self.handlers[connection["handler"]] addr = connection["address"].split(":") assert addr[0] == "tcp" host = "0.0.0.0" port = 1024 if len(addr) == 2: port = addr[1] if len(addr) == 3: host, port = addr[1:] port = int(port) connhandler = {"listen": Listener, "connect": Connector}[connection["type"]] return connhandler(kwargs={"server": self, "host": host, "port": port, "handler": handler}) def run(config, handlers): server = Server(handlers) server.configure(config) return server
2.71875
3
G5/DerivedData/ParameterProbing/checkme.py
shooking/ZoomPedalFun
9
2516
# -*- coding: ascii -*- import sys import json def check(data): OnOffstart = data.find(b"OnOff") if OnOffstart != -1: fxName="" OnOffblockSize = 0x30 for j in range(12): if data[OnOffstart + j + OnOffblockSize] == 0x00: break fxName = fxName + chr(data[OnOffstart + j + OnOffblockSize]) tD = { "fxname" :fxName } mmax = [] mdefault = [] name = [] mpedal = [] numParameters = 0 #print("OnOffStart at {}".format(OnOffstart)) try: # this is WAY too large, let except break the loop for j in range(0, 2000): """ if not ( data[OnOffstart + (j+1) * OnOffblockSize - 1] == 0x00 and data[OnOffstart + (j+1) * OnOffblockSize - 2] == 0x00): # ZD2 format has a length and PRME offset. ZDL has none of this. print("End of the parameters") break; if not ( data[OnOffstart + (j) * OnOffblockSize + 0x18 ] == 0x00 and data[OnOffstart + (j) * OnOffblockSize + 0x19] == 0x00 and data[OnOffstart + (j) * OnOffblockSize + 0x1A] == 0x00 and data[OnOffstart + (j) * OnOffblockSize + 0x1B] == 0x00 ): print("Empty next slot") break """ currName = "" for i in range(12): if data[OnOffstart + j * OnOffblockSize + i] == 0x00: break currName = currName + chr(data[OnOffstart + j * OnOffblockSize + i]) if data[OnOffstart + j * OnOffblockSize + i] & 0x80: raise Exception("Non binary char") if currName == "": break name.append(currName) mmax.append( data[OnOffstart + j * OnOffblockSize + 12] + data[OnOffstart + j * OnOffblockSize + 13] * 256) mdefault.append(data[OnOffstart + j * OnOffblockSize + 16] + data[OnOffstart + j * OnOffblockSize + 17] * 256); if data[OnOffstart + j * OnOffblockSize + 0x2C]: mpedal.append(True) else: mpedal.append(False) #print(mmax[j]) #print(mdefault[j]) """ print("[{}] {} {} {} {}".format( OnOffstart + (j+1) * OnOffblockSize, hex(data[OnOffstart + (j+1) * OnOffblockSize]), hex(data[OnOffstart + (j+1) * OnOffblockSize + 1]), hex(data[OnOffstart + (j+1) * OnOffblockSize + 2]), hex(data[OnOffstart + (j+1) * OnOffblockSize + 3])) ) """ #print("increment params") numParameters = numParameters + 1 except: pass #print("Found {} parameters.".format(numParameters)) tD['Parameters'] = [] # 0 is the OnOff state # 1 is the name # so actual paramters start from index 2, but clearly there are 2 less for i in range(numParameters - 2): #print(i) tD['Parameters'].append({'name': name[i+2], 'mmax': mmax[i + 2], 'mdefault': mdefault[i + 2], 'pedal': mpedal[i+2]}) #json.dump(tD, sys.stdout, indent=4) f = open(fxName+'.json', "w") json.dump(tD, f, indent=4) f.close() return fxName+'.OnOff' # handles a zoom firmware if __name__ == "__main__": if len(sys.argv) == 2: f = open(sys.argv[1], "rb") data = f.read() f.close() check(data)
2.5625
3
leehao/learn63.py
pilihaotian/pythonlearning
1
2517
# 随机6位密码 a-zA-Z0-9下划线 import random source = '' lower_char = [chr(x) for x in range(ord('a'), ord('z') + 1)] upper_char = [chr(x) for x in range(ord('A'), ord('Z') + 1)] number_char = [chr(x) for x in range(ord('0'), ord('9') + 1)] source += "".join(lower_char) source += "".join(upper_char) source += "".join(number_char) source += "_" print(source) # 随机取出20位字符串,包括下划线 while True: s = "".join(random.sample(source, 20)) if '_' in s: print(s) break
3.609375
4
gbic/tests.py
fga-eps-mds/2017.2-SiGI-Op_API
6
2518
from django.test import TestCase from rest_framework.test import APIRequestFactory from .models import GBIC, GBICType from .views import GBICListViewSet # Create your tests here. class GBICTest(TestCase): def test_gbic_view_set(self): request = APIRequestFactory().get("") gbic_detail = GBICListViewSet.as_view(actions={'get': 'retrieve'}) gbic_type_test = GBICType.objects.create(description='muito_bom') gbic_test = GBIC.objects.create( serial='showdaxuxa', patrimony_number='666', gbic_type=gbic_type_test ) response = gbic_detail(request, pk=gbic_test.pk) self.assertEqual(response.status_code, 200) def test_deleted_gbic_view_set(self): request = APIRequestFactory().get("") gbic_detail = GBICListViewSet.as_view(actions={'get': 'retrieve'}) gbic_type_test = GBICType.objects.create(description='muitoruim') gbic_test = GBIC.objects.create( serial='showdomilhao', patrimony_number='777', gbic_type=gbic_type_test ) gbic_test.delete() response = gbic_detail(request, pk=gbic_test.pk) self.assertEqual(response.status_code, 404)
2.125
2
fruits/core/fruit.py
alienkrieg/fruits
4
2519
import inspect from typing import List, Union, Set, Any import numpy as np from fruits.cache import Cache, CoquantileCache from fruits.scope import force_input_shape, FitTransform from fruits.core.callback import AbstractCallback from fruits.signature.iss import SignatureCalculator, CachePlan from fruits.words.word import Word from fruits.sieving.abstract import FeatureSieve from fruits.preparation.abstract import DataPreparateur class Fruit: """Feature Extractor using iterated sums. A Fruit consists of a number of :class:`~fruits.core.fruit.FruitBranch` objects. At the end of the pipeline, each branch returns their own features and they will be concatenated by this class. A simple example (using two branches): .. code-block:: python fruit = fruits.Fruit("My Fruit") # optional: add preparateurs for preprocessing fruit.add(fruits.preparation.INC) # add words for iterated sums calculation fruit.add(fruits.words.creation.simplewords_by_weight(4)) # choose sieves fruit.add(fruits.sieving.PPV(0.5)) fruit.add(fruits.sieving.END) # add a new branch without INC fruit.fork() fruit.add(fruits.words.creation.simplewords_by_weight(4)) fruit.add(fruits.sieving.PPV(0.5)) fruit.add(fruits.sieving.END) # configure the fruit fruit.configure(mode="extended") # fit the fruit on a time series dataset fruit.fit(X_train) # transform the dataset X_train_transformed = fruit.transform(X_train) X_test_tranformed = fruit.transform(X_test) # use the transformed results (features) in a classifier ... The ``fruit`` above will result in ``2*8*2=32`` features per time series. """ def __init__(self, name: str = ""): self.name: str = name # list of FruitBranches self._branches: List[FruitBranch] = [] # pointer for the current branch index self._cbi: int = 0 self._fitted: bool = False @property def name(self) -> str: """Simple identifier for the Fruit object.""" return self._name @name.setter def name(self, name: str): self._name = name def fork(self, branch: "FruitBranch" = None): """Adds a new branch to the pipeline. If none is given, an empty FruitBranch will be created and switched to. :type branch: FruitBranch, optional """ if branch is None: branch = FruitBranch() self._branches.append(branch) self._cbi = len(self._branches) - 1 self._fitted = False def branch(self, index: int = None): """Returns the currently selected branch or the branch with the given index. :rtype: FruitBranch """ if index is None: return self._branches[self._cbi] return self._branches[index] def branches(self) -> list: """Returns all branches of this Fruit object. :rtype: list """ return self._branches def switch_branch(self, index: int): """Switches to the branch with the given index. :param index: Integer in ``[0, 1, ..., len(self.branches())-1]`` :type index: int """ if not (0 <= index < len(self._branches)): raise IndexError("Index has to be in [0, len(self.branches()))") self._cbi = index def add(self, *objects: Union[FitTransform, Word, type]): """Adds one or multiple object(s) to the currently selected branch. :param objects: One or more objects of the following types: - :class:`~fruits.preparation.abstract.DataPreparateur` - :class:`~fruits.words.word.Word` - :class:`~fruits.sieving.abstract.FeatureSieve` :type objects: Union[FitTransform, Word] """ if len(self._branches) == 0: self.fork() self._branches[self._cbi].add(*objects) self._fitted = False def nfeatures(self) -> int: """Returns the total number of features of all branches combined. :rtype: int """ return sum([branch.nfeatures() for branch in self._branches]) def configure(self, **kwargs: Any): """Makes changes to the default configuration of a all branches if arguments differ from ``None``. :param kwargs: For possible options, have a look at :meth:`fruits.core.fruit.FruitBranch.configure`. :type kwargs: Any """ for branch in self._branches: branch.configure(**kwargs) def fit(self, X: np.ndarray): """Fits all branches to the given data. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray """ for branch in self._branches: branch.fit(X) self._fitted = True def transform(self, X: np.ndarray, callbacks: List[AbstractCallback] = []) -> np.ndarray: """Returns a two dimensional array of all features from all branches this Fruit object contains. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray :param callbacks: List of callbacks. To write your own callback, override the class :class:`~fruits.core.callback.AbstractCallback`., defaults to None :type callbacks: List[AbstractCallback], optional :rtype: np.ndarray :raises: RuntimeError if Fruit.fit wasn't called """ if not self._fitted: raise RuntimeError("Missing call of self.fit") result = np.zeros((X.shape[0], self.nfeatures())) index = 0 for branch in self._branches: for callback in callbacks: callback.on_next_branch() k = branch.nfeatures() result[:, index:index+k] = branch.transform(X, callbacks) index += k result = np.nan_to_num(result, copy=False, nan=0.0) return result def fit_transform(self, X: np.ndarray) -> np.ndarray: """Fits all branches to the given dataset and returns the transformed results of X from all branches. :param X: (Multidimensional) time series dataset :type X: np.ndarray :returns: Two dimensional feature array :rtype: np.ndarray """ self.fit(X) return self.transform(X) def summary(self) -> str: """Returns a summary of this object. The summary contains a summary for each FruitBranch in this Fruit object. :rtype: str """ summary = "{:=^80}".format(f"Summary of fruits.Fruit: '{self.name}'") summary += f"\nBranches: {len(self.branches())}" summary += f"\nFeatures: {self.nfeatures()}" for branch in self.branches(): summary += "\n\n" + branch.summary() summary += "\n{:=^80}".format(f"End of Summary") return summary def copy(self) -> "Fruit": """Creates a shallow copy of this Fruit object. This also creates shallow copies of all branches in this object. :rtype: Fruit """ copy_ = Fruit(self.name+" (Copy)") for branch in self._branches: copy_.fork(branch.copy()) return copy_ def deepcopy(self) -> "Fruit": """Creates a deep copy of this Fruit object. This also creates deep copies of all branches in this object. :rtype: Fruit """ copy_ = Fruit(self.name+" (Copy)") for branch in self._branches: copy_.fork(branch.deepcopy()) return copy_ class FruitBranch: """One branch of a Fruit object. A FruitBranch object extracts values from time series data that are somehow representative of the input data. The user can customize any of the following three steps. - Preparing data: Apply functions at the start of the extraction procedure. There are many so called :class:`~fruits.preparation.abstract.DataPreparateur` objects in fruits available for preprocessing. The preparateurs will be applied sequentially to the input data. - Calculating Iterated Sums: The preprocessed data is now used to calculate the iterated sums signature for different :class:`~fruits.words.word.Word` objects the user can specify. - Extracting Features: Each :class:`~fruits.sieving.abstract.FeatureSieve` added to the branch will be fitted on the iterated sums from the previous step. The branch then returns an array of numbers (the transformed results from those sieves), i.e. the features for each time series. """ def __init__(self): # lists of used classes for data processing self._preparateurs: list = [] self._words: list = [] self._sieves: list = [] # calculator options used in the ISS calculation self._calculator_options: dict = {"batch_size": 1, "mode": "single"} # list with inner lists containing sieves # all sieves in one list are trained on one specific output # of an ISS-result self._sieves_extended: list = [] # configurations for fitting self._fitted: bool = False self._fit_sample_size: Union[float, int] = 1 # cache that is calculated at fitting and also used in the # transformation process self._cache: Cache def configure(self, mode: str = None, batch_size: int = None, fit_sample_size: Union[float, int] = None): """Makes changes to the default configuration of a fruit branch if arguments differ from ``None``. :param mode: See :meth:`fruits.signature.iss.SignatureCalculator.transform`, defaults to None :type mode: str, optional :param batch_size: See :meth:`~ruits.signature.iss.SignatureCalculator.transform`, defaults to None :type batch_size: int, optional :param fit_sample_size: Size of the random time series sample that is used for fitting. This is represented as a float which will be multiplied by ``X.shape[0]`` or ``1`` for one random time series., defaults to 1 :type fit_sample_size: Union[float, int] """ if mode is not None: self._calculator_options["mode"] = mode if batch_size is not None: self._calculator_options["batch_size"] = batch_size if fit_sample_size is not None: self._fit_sample_size = fit_sample_size def add_preparateur(self, preparateur: DataPreparateur): """Adds a preparateur to the branch. :type preparateur: DataPreparateur """ if not isinstance(preparateur, DataPreparateur): raise TypeError self._preparateurs.append(preparateur) self._fitted = False def get_preparateurs(self) -> List[DataPreparateur]: """Returns a list of all preparateurs added to the branch. :rtype: List[DataPreparateur] """ return self._preparateurs def clear_preparateurs(self): """Removes all preparateurs that were added to this branch.""" self._preparateurs = [] self._fitted = False def add_word(self, word: Word): """Adds a word to the branch. :type word: Word """ if not isinstance(word, Word): raise TypeError self._words.append(word) self._fitted = False def get_words(self) -> List[Word]: """Returns a list of all words in the branch. :rtype: List[Word] """ return self._words def clear_words(self): """Removes all words that were added to this branch.""" self._words = [] self._sieves_extended = [] self._fitted = False def add_sieve(self, sieve: FeatureSieve): """Appends a new feature sieve to the FruitBranch. :type sieve: FeatureSieve """ if not isinstance(sieve, FeatureSieve): raise TypeError self._sieves.append(sieve) self._fitted = False def get_sieves(self) -> List[FeatureSieve]: """Returns a list of all feature sieves added to the branch. :rtype: List[FeatureSieve] """ return self._sieves def clear_sieves(self): """Removes all feature sieves that were added to this branch.""" self._sieves = [] self._sieve_prerequisites = None self._sieves_extended = [] self._fitted = False def add(self, *objects: Union[FitTransform, Word, type]): """Adds one or multiple object(s) to the branch. :type objects: One or more objects of the following types: - :class:`~fruits.preparation.abstract.DataPreparateur` - :class:`~fruits.words.word.Word` - :class:`~fruits.sieving.abstract.FeatureSieve` """ objects_flattened = np.array(objects, dtype=object).flatten() for obj in objects_flattened: if inspect.isclass(obj): obj = obj() if isinstance(obj, DataPreparateur): self.add_preparateur(obj) elif isinstance(obj, Word): self.add_word(obj) elif isinstance(obj, FeatureSieve): self.add_sieve(obj) else: raise TypeError("Cannot add variable of type"+str(type(obj))) def clear(self): """Clears all settings, configurations and calculated results the branch has. After the branch is cleared, it has the same settings as a newly created FruitBranch object. """ self.clear_preparateurs() self.clear_words() self.clear_sieves() self._calculator_options = {"batch_size": 1, "mode": "single"} def nfeatures(self) -> int: """Returns the total number of features the current configuration produces. :rtype: int """ if self._calculator_options["mode"] == "extended": return ( sum([s.nfeatures() for s in self._sieves]) * CachePlan(self._words).n_iterated_sums( list(range(len(self._words))) ) ) else: return ( sum([s.nfeatures() for s in self._sieves]) * len(self._words) ) def _compile(self): # checks if the FruitBranch is configured correctly and ready # for fitting if not self._words: raise RuntimeError("No words specified for ISS calculation") if not self._sieves: raise RuntimeError("No FeatureSieve objects specified") def _collect_cache_keys(self) -> Set[str]: # collects cache keys of all FitTransformers in the branch keys: Set[str] = set() for prep in self._preparateurs: prep_keys = prep._get_cache_keys() if 'coquantile' in prep_keys: keys = keys.union(prep_keys['coquantile']) for sieve in self._sieves: sieve_keys = sieve._get_cache_keys() if 'coquantile' in sieve_keys: keys = keys.union(sieve_keys['coquantile']) return keys def _get_cache(self, X: np.ndarray): # returns the already processed cache needed in this branch self._cache = CoquantileCache() self._cache.process(X, list(self._collect_cache_keys())) def _select_fit_sample(self, X: np.ndarray) -> np.ndarray: # returns a sample of the data used for fitting if (isinstance(self._fit_sample_size, int) and self._fit_sample_size == 1): ind = np.random.randint(0, X.shape[0]) return X[ind:ind+1, :, :] else: s = int(self._fit_sample_size * X.shape[0]) if s < 1: s = 1 indices = np.random.choice(X.shape[0], size=s, replace=False) return X[indices, :, :] def fit(self, X: np.ndarray): """Fits the branch to the given dataset. What this action explicitly does depends on the FruitBranch configuration. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray """ self._compile() self._get_cache(X) prepared_data = self._select_fit_sample(X) for prep in self._preparateurs: prep.fit(prepared_data) prepared_data = prep.transform(prepared_data, cache=self._cache) self._sieves_extended = [] iss_calculations = SignatureCalculator().transform( prepared_data, words=self._words, **self._calculator_options )[0] for iterated_data in iss_calculations: iterated_data = iterated_data.reshape(iterated_data.shape[0] * iterated_data.shape[1], iterated_data.shape[2]) sieves_copy = [sieve.copy() for sieve in self._sieves] for sieve in sieves_copy: sieve.fit(iterated_data[:, :]) self._sieves_extended.append(sieves_copy) self._fitted = True def transform(self, X: np.ndarray, callbacks: List[AbstractCallback] = []) -> np.ndarray: """Transforms the given time series dataset. The results are the calculated features for the different time series. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray :param callbacks: List of callbacks. To write your own callback, override the class :class:`~fruits.core.callback.AbstractCallback`., defaults to [] :type callbacks: List[AbstractCallback], optional :rtype: np.ndarray :raises: RuntimeError if ``self.fit`` wasn't called """ if not self._fitted: raise RuntimeError("Missing call of self.fit") self._get_cache(X) prepared_data = force_input_shape(X) for prep in self._preparateurs: prepared_data = prep.transform(prepared_data, cache=self._cache) for callback in callbacks: callback.on_preparateur(prepared_data) for callback in callbacks: callback.on_preparation_end(prepared_data) sieved_data = np.zeros((prepared_data.shape[0], self.nfeatures())) k = 0 iss_calculations = SignatureCalculator().transform( prepared_data, words=self._words, **self._calculator_options )[0] for i, iterated_data in enumerate(iss_calculations): for callback in callbacks: callback.on_iterated_sum(iterated_data) for sieve in self._sieves_extended[i]: nf = sieve.nfeatures() new_features = nf * iterated_data.shape[1] for it in range(iterated_data.shape[1]): sieved_data[:, k+it*nf:k+(it+1)*nf] = sieve.transform( iterated_data[:, it, :], cache=self._cache, ) for callback in callbacks: callback.on_sieve(sieved_data[k:k+new_features]) k += new_features for callback in callbacks: callback.on_sieving_end(sieved_data) return sieved_data def fit_transform(self, X: np.ndarray) -> np.ndarray: """This function does the same that calling ``self.fit(X)`` and ``self.transform(X)`` consecutively does. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at `:meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray :returns: Array of features. :rtype: np.ndarray """ self.fit(X) return self.transform(X) def summary(self) -> str: """Returns a summary of this object. The summary contains all added preparateurs, words and sieves. :rtype: str """ summary = "{:-^80}".format("fruits.FruitBranch") summary += f"\nNumber of features: {self.nfeatures()}" summary += f"\n\nPreparateurs ({len(self._preparateurs)}): " if len(self._preparateurs) == 0: summary += "-" else: summary += "\n\t+ " + \ "\n\t+ ".join([str(x) for x in self._preparateurs]) summary += f"\nIterators ({len(self._words)}): " if len(self._words) == 0: summary += "-" elif len(self._words) > 10: summary += "\n\t+ " + \ "\n\t+ ".join([str(x) for x in self._words[:9]]) summary += "\n\t..." else: summary += "\n\t+ " + \ "\n\t+ ".join([str(x) for x in self._words]) summary += f"\nSieves ({len(self._sieves)}): " if len(self._sieves) == 0: summary += "-" else: for x in self._sieves: lines = x.summary().split("\n") summary += "\n\t+ " + lines[0] summary += "\n\t " summary += "\n\t ".join(lines[1:]) return summary def copy(self) -> "FruitBranch": """Returns a shallow copy of this FruitBranch object. :returns: Copy of the branch with same settings but all calculations done erased. :rtype: FruitBranch """ copy_ = FruitBranch() for preparateur in self._preparateurs: copy_.add(preparateur) for iterator in self._words: copy_.add(iterator) for sieve in self._sieves: copy_.add(sieve) return copy_ def deepcopy(self) -> "FruitBranch": """Returns a deep copy of this FruitBranch object. :returns: Deepcopy of the branch with same settings but all calculations done erased. :rtype: FruitBranch """ copy_ = FruitBranch() for preparateur in self._preparateurs: copy_.add(preparateur.copy()) for iterator in self._words: copy_.add(iterator.copy()) for sieve in self._sieves: copy_.add(sieve.copy()) copy_._calculator_options = self._calculator_options.copy() return copy_
2.546875
3
workoutizer/__main__.py
pa3kDaWae/workoutizer
0
2520
<filename>workoutizer/__main__.py<gh_stars>0 import os import argparse import subprocess import socket import sys import click from django.core.management import execute_from_command_line from workoutizer.settings import WORKOUTIZER_DIR, WORKOUTIZER_DB_PATH, TRACKS_DIR from workoutizer import __version__ BASE_DIR = os.path.dirname(os.path.dirname(__file__)) SETUP_DIR = os.path.join(BASE_DIR, 'setup') os.environ["DJANGO_SETTINGS_MODULE"] = "workoutizer.settings" example_rpi_cmd = "wkz --setup_rpi vendor_id=091e product_id=4b48" url_help = 'specify ip address and port pair, like: address:port' @click.group() def cli(): pass @click.command(help='Mandatory command to initialize workoutizer. This fetches the static files, creates the database ' 'and applies the required migrations.') def init(): _build_home() execute_from_command_line(["manage.py", "collectstatic", "--noinput"]) execute_from_command_line(["manage.py", "migrate"]) execute_from_command_line(["manage.py", "check"]) click.echo(f"database and track files are stored in: {WORKOUTIZER_DIR}") @click.option('--ip', default="", help=url_help) @click.option('--product_id', help="product ip of your device", required=True) @click.option('--vendor_id', help="vendor ip of your device", required=True) @click.command(help='Configure Raspberry Pi to auto mount devices. Passing vendor and product id is required. Passing ' f'the local ip address and port is optionally. E.g.: {example_rpi_cmd}') def setup_rpi(ip, vendor_id, product_id): if not ip: ip = _get_local_ip_address() answer = input(f"Are you sure you want to setup your Raspberry Pi?\n\n" f"This will copy the required udev rule and systemd service file\n" f"to your system to enable automated mounting of your device.\n" f"This might take a while...\n\n" f"Start setup? [Y/n] ") if answer.lower() == 'y': click.echo(f"installing ansible...") _pip_install('ansible==2.9.10') click.echo(f"starting setup using ansible...") _setup_rpi( vendor_id=vendor_id, product_id=product_id, ip_port=f"{ip}:8000" ) _run_ansible(playbook='install_packages.yml') click.echo(f"Successfully configured to automatically mount your device when plugged in. Note: These changes " f"require a system restart to take effect.") else: click.echo(f"Aborted.") @click.argument('url', default="") @click.command(help="Run workoutizer. Passing the local ip address and port is optionally. In case of no ip address " "being passed, it will be determined automatically. Usage, e.g.: 'wkz run 0.0.0.0:8000'.") def run(url): if not url: url = f"{_get_local_ip_address()}:8000" execute_from_command_line(["manage.py", "runserver", url]) @click.argument('url', default="") @click.command(help='Configure workoutizer to run as systemd service. Passing the local ip address and port is ' 'optionally. In case of no ip address being passed, it will be determined automatically.') def wkz_as_service(url): _pip_install('ansible==2.9.10') _wkz_as_service(url=url) @click.argument('cmd', nargs=1) @click.command(help="Pass commands to django's manage.py. Convenience function to access all django commands which are " "not yet covered with the given set of workoutizer commands. Usage, e.g.: " "wkz manage 'runserver 0.0.0.0:8000 --noreload'.") def manage(cmd): execute_from_command_line(["manage.py"] + cmd.split(' ')) @click.command(help='Show the version of currently installed workoutizer.') def version(): click.echo(__version__) @click.command(help='Check for a newer version and install if there is any.') def upgrade(): _upgrade() cli.add_command(upgrade) cli.add_command(version) cli.add_command(init) cli.add_command(setup_rpi) cli.add_command(run) cli.add_command(manage) cli.add_command(wkz_as_service) def _upgrade(): latest_version = _get_latest_version_of("workoutizer") from workoutizer import __version__ as current_version if latest_version: click.echo(f"found newer version: {latest_version}, you have {current_version} installed") _pip_install('workoutizer', upgrade=True) execute_from_command_line(["manage.py", "collectstatic", "--noinput"]) execute_from_command_line(["manage.py", "migrate"]) execute_from_command_line(["manage.py", "check"]) click.echo(f"Successfully upgraded from {current_version} to {latest_version}") else: click.echo(f"No update available. You are running the latest version: {current_version}") def _get_latest_version_of(package: str): outdated = str( subprocess.check_output([sys.executable, "-m", "pip", "list", '--outdated', '--disable-pip-version-check'])) if package in outdated: output = str(subprocess.check_output([sys.executable, "-m", "pip", "search", package])) latest_version = output[output.find('LATEST'):].split('\\n')[0].split(' ')[-1] return latest_version else: return False def _setup_rpi(vendor_id: str, product_id: str, ip_port: str = None): if not ip_port: ip_port = f"{_get_local_ip_address()}:8000" result = _run_ansible( playbook='setup_on_rpi.yml', variables={ 'vendor_id': vendor_id, 'product_id': product_id, 'address_plus_port': ip_port, } ) if result == 0: pass else: click.echo(f"ERROR: Could not configure Raspberry Pi, see above errors.") quit() return result def _wkz_as_service(url: str): click.echo(f"configuring workoutizer to run as system service") if not url: url = f"{_get_local_ip_address()}:8000" env_binaries = sys.executable wkz_executable = env_binaries[:env_binaries.find('python')] + "wkz" result = _run_ansible( playbook='wkz_as_service.yml', variables={ 'address_plus_port': url, 'wkz_executable': wkz_executable, } ) if result == 0: click.echo(f"Successfully configured workoutizer as systemd service. Run it with: systemctl start wkz.service") else: click.echo(f"ERROR: Could not configure workoutizer as systemd service, see above errors.") return result def _get_local_ip_address(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip_address = s.getsockname()[0] s.close() return ip_address def _build_home(): if os.path.isdir(WORKOUTIZER_DIR): if os.path.isfile(WORKOUTIZER_DB_PATH): click.echo(f"Found existing workoutizer database at: {WORKOUTIZER_DB_PATH}\n") answer = input(f"Workoutizer could try to use the existing database instead of creating a new one.\n" f"Note that this could lead to faulty behaviour because of mismatching applied\n" f"migrations on this database.\n\n" f"Do you want to use the existing database instead of creating a new one? [Y/n] ") if answer.lower() == 'y': click.echo(f"keeping existing database at {WORKOUTIZER_DB_PATH}") return else: click.echo(f"removed database at {WORKOUTIZER_DB_PATH}") os.remove(WORKOUTIZER_DB_PATH) _make_tracks_dir(TRACKS_DIR) else: os.mkdir(WORKOUTIZER_DIR) _make_tracks_dir(TRACKS_DIR) def _make_tracks_dir(path): if not os.path.isdir(path): os.mkdir(path) class ParseDict(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): d = {} if values: for item in values: split_items = item.split("=", 1) key = split_items[0].strip() # we remove blanks around keys, as is logical value = split_items[1] d[key] = value setattr(namespace, self.dest, d) def _pip_install(package, upgrade: bool = False): if upgrade: subprocess.check_call([sys.executable, "-m", "pip", "install", package, '--upgrade']) else: subprocess.check_call([sys.executable, "-m", "pip", "install", package]) def _run_ansible(playbook: str, variables: dict = None): if variables is None: variables = {} from ansible import context from ansible.cli import CLI from ansible.module_utils.common.collections import ImmutableDict from ansible.executor.playbook_executor import PlaybookExecutor from ansible.parsing.dataloader import DataLoader from ansible.inventory.manager import InventoryManager from ansible.vars.manager import VariableManager loader = DataLoader() context.CLIARGS = ImmutableDict( tags={}, listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user='xxx', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='root', verbosity=True, check=False, start_at_task=None ) inventory = InventoryManager(loader=loader, sources=()) variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False)) variable_manager._extra_vars = variables pbex = PlaybookExecutor(playbooks=[os.path.join(SETUP_DIR, 'ansible', playbook)], inventory=inventory, variable_manager=variable_manager, loader=loader, passwords={}) return pbex.run() if __name__ == '__main__': cli()
2.296875
2
bcbio/bam/trim.py
matanhofree/bcbio-nextgen
1
2521
"""Provide trimming of input reads from Fastq or BAM files. """ import os import sys import tempfile from bcbio.utils import (file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap) from bcbio.log import logger from bcbio.bam import fastq from bcbio.provenance import do from Bio.Seq import Seq from itertools import izip, repeat from bcbio.distributed.transaction import file_transaction from bcbio.pipeline import config_utils SUPPORTED_ADAPTERS = { "illumina": ["AACACTCTTTCCCT", "AGATCGGAAGAGCG"], "truseq": ["AGATCGGAAGAG"], "polya": ["AAAAAAAAAAAAA"], "nextera": ["AATGATACGGCGA", "CAAGCAGAAGACG"]} QUALITY_FLAGS = {5: ['"E"', '"&"'], 20: ['"T"', '"5"']} def trim_adapters(fastq_files, dirs, config): QUALITY_CUTOFF = 5 to_trim = _get_sequences_to_trim(config) resources = config_utils.get_resources("AlienTrimmer", config) try: jarpath = config_utils.get_program("AlienTrimmer", config, "dir") # fall back on Cutadapt if AlienTrimmer is not installed # XXX: remove after it has been live for a while except: return trim_read_through(fastq_files, dirs, config) jarfile = config_utils.get_jar("AlienTrimmer", jarpath) jvm_opts = " ".join(resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])) base_cmd = ("java -jar {jvm_opts} {jarfile} -k 10 ") fastq1 = fastq_files[0] supplied_quality_format = _get_quality_format(config) cores = config["algorithm"].get("num_cores", 0) out_files = _get_read_through_trimmed_outfiles(fastq_files, dirs) fastq1_out = out_files[0] if supplied_quality_format == "illumina": quality_flag = QUALITY_FLAGS[QUALITY_CUTOFF][0] else: quality_flag = QUALITY_FLAGS[QUALITY_CUTOFF][1] quality_flag = '-q ' + quality_flag if len(fastq_files) == 1: if file_exists(fastq1_out): return [fastq1_out] base_cmd += ("-i {fastq1} -o {tx_fastq1_out} -c {temp_file} " "{quality_flag}") message = "Trimming %s from %s with AlienTrimmer." % (to_trim, fastq1) else: fastq2 = fastq_files[1] fastq2_out = out_files[1] if all(map(file_exists, [fastq1_out, fastq2_out])): return [fastq1_out, fastq2_out] base_cmd += ("-if {fastq1} -ir {fastq2} -of {tx_fastq1_out} " "-or {tx_fastq2_out} -c {temp_file} {quality_flag}") message = ("Trimming %s from %s and %s with AlienTrimmer." % (to_trim, fastq1, fastq2)) with tempfile.NamedTemporaryFile(delete=False) as temp: temp_file = temp.name for adapter in to_trim: temp.write(adapter + "\n") temp.close() if len(fastq_files) == 1: with file_transaction(fastq1_out) as tx_fastq1_out: do.run(base_cmd.format(**locals()), message) return [fastq1_out] else: with file_transaction([fastq1_out, fastq2_out]) as tx_out_files: tx_fastq1_out = tx_out_files[0] tx_fastq2_out = tx_out_files[1] do.run(base_cmd.format(**locals()), message) return [fastq1_out, fastq2_out] def trim_read_through(fastq_files, dirs, lane_config): """ for small insert sizes, the read length can be longer than the insert resulting in the reverse complement of the 3' adapter being sequenced. this takes adapter sequences and trims the only the reverse complement of the adapter MYSEQUENCEAAAARETPADA -> MYSEQUENCEAAAA (no polyA trim) """ quality_format = _get_quality_format(lane_config) to_trim = _get_sequences_to_trim(lane_config) out_files = _get_read_through_trimmed_outfiles(fastq_files, dirs) fixed_files = append_stem(out_files, ".fixed") if all(map(file_exists, fixed_files)): return fixed_files logger.info("Trimming %s from the 3' end of reads in %s using " "cutadapt." % (", ".join(to_trim), ", ".join(fastq_files))) cores = lane_config["algorithm"].get("num_cores", 1) out_files = _cutadapt_trim(fastq_files, quality_format, to_trim, out_files, cores) fixed_files = remove_short_reads(out_files, dirs, lane_config) return fixed_files def remove_short_reads(fastq_files, dirs, lane_config): """ remove reads from a single or pair of fastq files which fall below a length threshold (30 bases) """ min_length = int(lane_config["algorithm"].get("min_read_length", 20)) supplied_quality_format = _get_quality_format(lane_config) if supplied_quality_format == "illumina": quality_format = "fastq-illumina" else: quality_format = "fastq-sanger" if is_pair(fastq_files): fastq1, fastq2 = fastq_files out_files = fastq.filter_reads_by_length(fastq1, fastq2, quality_format, min_length) else: out_files = [fastq.filter_single_reads_by_length(fastq_files[0], quality_format, min_length)] map(os.remove, fastq_files) return out_files def _get_read_through_trimmed_outfiles(fastq_files, dirs): out_dir = os.path.join(dirs["work"], "trim") safe_makedir(out_dir) out_files = replace_directory(append_stem(fastq_files, "_trimmed"), out_dir) return out_files def _get_sequences_to_trim(lane_config): builtin_adapters = _get_builtin_adapters(lane_config) polya = builtin_adapters.get("polya", [None])[0] # allow for trimming of custom sequences for advanced users custom_trim = lane_config["algorithm"].get("custom_trim", []) builtin_adapters = {k: v for k, v in builtin_adapters.items() if k != "polya"} trim_sequences = custom_trim # for unstranded RNA-seq, libraries, both polyA and polyT can appear # at the 3' end as well if polya: trim_sequences += [polya, str(Seq(polya).reverse_complement())] # also trim the reverse complement of the adapters for _, v in builtin_adapters.items(): trim_sequences += [str(Seq(sequence)) for sequence in v] trim_sequences += [str(Seq(sequence).reverse_complement()) for sequence in v] return trim_sequences def _cutadapt_trim(fastq_files, quality_format, adapters, out_files, cores): """Trimming with cutadapt, using version installed with bcbio-nextgen. Uses the system executable to find the version next to our Anaconda Python. TODO: Could we use cutadapt as a library to avoid this? """ if quality_format == "illumina": quality_base = "64" else: quality_base = "33" # --times=2 tries twice remove adapters which will allow things like: # realsequenceAAAAAAadapter to remove both the poly-A and the adapter # this behavior might not be what we want; we could also do two or # more passes of cutadapt cutadapt = os.path.join(os.path.dirname(sys.executable), "cutadapt") base_cmd = [cutadapt, "--times=" + "2", "--quality-base=" + quality_base, "--quality-cutoff=5", "--format=fastq", "--minimum-length=0"] adapter_cmd = map(lambda x: "--adapter=" + x, adapters) base_cmd.extend(adapter_cmd) if all(map(file_exists, out_files)): return out_files with file_transaction(out_files) as tmp_out_files: if isinstance(tmp_out_files, basestring): tmp_out_files = [tmp_out_files] map(_run_cutadapt_on_single_file, izip(repeat(base_cmd), fastq_files, tmp_out_files)) return out_files @map_wrap def _run_cutadapt_on_single_file(base_cmd, fastq_file, out_file): stat_file = replace_suffix(out_file, ".trim_stats.txt") with open(stat_file, "w") as stat_handle: cmd = list(base_cmd) cmd.extend(["--output=" + out_file, fastq_file]) do.run(cmd, "Running cutadapt on %s." % (fastq_file), None) def _get_quality_format(lane_config): SUPPORTED_FORMATS = ["illumina", "standard"] quality_format = lane_config["algorithm"].get("quality_format", "standard").lower() if quality_format not in SUPPORTED_FORMATS: logger.error("quality_format is set to an unsupported format. " "Supported formats are %s." % (", ".join(SUPPORTED_FORMATS))) exit(1) return quality_format def _get_builtin_adapters(lane_config): chemistries = lane_config["algorithm"].get("adapters", []) adapters = {chemistry: SUPPORTED_ADAPTERS[chemistry] for chemistry in chemistries if chemistry in SUPPORTED_ADAPTERS} return adapters
2.0625
2
FEniCSUI/AnalysesHub/views.py
nasserarbabi/FEniCSUI-dev
0
2522
<filename>FEniCSUI/AnalysesHub/views.py from rest_framework.response import Response from rest_framework.views import APIView from django.shortcuts import get_object_or_404 from dashboard.models import projects from .models import AnalysisConfig, SolverResults, SolverProgress, DockerLogs from rest_framework.parsers import FormParser, JSONParser, MultiPartParser, FileUploadParser from rest_framework import status import docker import os import json from zipfile import ZipFile from django.http import HttpResponse from threading import Thread from time import sleep from datetime import datetime class solverConfig(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ return a list of entries within a given category """ project = get_object_or_404(projects, id=kwargs['project_id']) category = request.query_params.get('category') parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if category in jsonHelper: return Response(data=jsonHelper[category], status=status.HTTP_200_OK) else: return Response(data="The category {} does not exist".format(category), status=status.HTTP_204_NO_CONTENT) def post(self, request, *args, **kwargs): """ create a new category for solver configuration """ project = get_object_or_404(projects, id=kwargs['project_id']) data = request.data.dict() category = request.query_params.get('category') parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) # if request does not contain a name if not "Name" in data: return Response(data="Please provide a 'Name' for the entry", status=400) # if there is no category similar to the user request if category not in jsonHelper: jsonHelper[category] = [] jsonHelper[category].append(data) # check if the entry with the same name exists elif not list(filter(lambda name: name["Name"] == data["Name"], jsonHelper[category])): jsonHelper[category].append(data) else: return Response(data="an entry with the same name exists", status=400) parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper[category], status=status.HTTP_201_CREATED) def put(self, request, *args, **kwargs): """ Edit an existing category entry's data """ project = get_object_or_404(projects, id=kwargs['project_id']) data = request.data.dict() # if request does not contain a name if not "Name" in data: return Response(data="Please provide a 'Name' for the entry", status=400) category = request.query_params.get('category') list_id = int(request.query_params.get('id')) parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if category in jsonHelper: if list_id >= 0 and list_id < len(jsonHelper[category]): # check if an entry with the same name exists if not list(filter(lambda name: name["Name"] == data["Name"], jsonHelper[category])) or jsonHelper[category][list_id]["Name"] == data["Name"]: jsonHelper[category][list_id] = data parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper[category], status=status.HTTP_200_OK) else: return Response(data="an entry with the same name exists", status=400) else: return Response(data="No entry with the id={}".format(list_id), status=status.HTTP_204_NO_CONTENT) else: return Response(data="The category {} does not exist".format(category), status=status.HTTP_204_NO_CONTENT) def delete(self, request, *args, **kwargs): """ Delete an entry from the category """ project = get_object_or_404(projects, id=kwargs['project_id']) category = request.query_params.get('category') list_id = int(request.query_params.get('id')) parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if jsonHelper[category]: if list_id >= 0 and list_id < len(jsonHelper[category]): jsonHelper[category].pop(int(list_id)) parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper[category], status=status.HTTP_200_OK) else: return Response(data="No entry with the id={}".format(list_id), status=status.HTTP_204_NO_CONTENT) else: return Response(data="The category {} does not exist".format(category), status=status.HTTP_204_NO_CONTENT) class Categories(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ Return the existing categories in the solver config """ project = get_object_or_404(projects, id=kwargs['project_id']) config = json.loads(AnalysisConfig.objects.get( project=project).config).keys() return Response(data=config, status=status.HTTP_200_OK) def delete(self, request, *args, **kwargs): """ DELETE the existing categories in the solver config """ project = get_object_or_404(projects, id=kwargs['project_id']) category = request.query_params.get('category') parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if category in jsonHelper: del jsonHelper[category] parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper, status=status.HTTP_410_GONE) else: return Response(data="The category {} does not exist!".format(category), status=status.HTTP_404_NOT_FOUND) class getConfiguration(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ Get the solver config to be submitted to the analysis """ project = get_object_or_404(projects, id=kwargs['project_id']) config = AnalysisConfig.objects.filter(project=project).values()[0] return Response(data=config["config"], status=status.HTTP_200_OK) def streamDockerLog(container, project): for line in container.logs(stream=True): logs = get_object_or_404(DockerLogs, project=project) now = datetime.now() current_time = now.strftime("[%H:%M:%S]: ") logs.log = current_time + str(line.strip(), 'utf-8') + "\n" + logs.log logs.save() class solvers(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ Runs the related solver defined in url parameters """ project = get_object_or_404(projects, id=kwargs['project_id']) # set progress to initial SolverProgress.objects.get_or_create( project=project, defaults={'progress' :json.dumps({"status": "", "message": ""})}) progress = SolverProgress.objects.get( project=project ) progress.progress = json.dumps({"state":{"status": "RECEIVED", "message": {"progress": "0.0"}}, "logs":""}) progress.save() # initiate related solver solver = request.query_params.get('solver') client = docker.from_env() solverPath = os.path.abspath('./solvers') if DockerLogs.objects.filter(project=project).exists(): DockerLogs.objects.filter(project=project).delete() DockerLogs.objects.create(project=project,log="") try: container = client.containers.run( "quay.io/fenicsproject/stable:current", volumes={solverPath: { 'bind': '/home/fenics/shared', 'mode': 'rw'}}, working_dir="/home/fenics/shared", # runs solver.py with two arguments to be passed in to python file command=["`sudo pip3 install requests \n python3 solverHub.py {} {}`".format( project.id, solver)], name="FEniCSDocker", auto_remove=False, detach=True) thread = Thread(target=streamDockerLog, args=(container, project)) thread.start() except: message = '''please check if the docker is running, and if a container with the name FEniCSDocker does not exist. if you are using docker windows, make sure the file sharing setting for the main folder directory is on. If you are woking with WSL, make sure it has access to the windows docker. Instructions can be found at: https://nickjanetakis.com/blog/setting-up-docker-for-windows-and-wsl-to-work-flawlessly''' print(message) return Response(data=message, status=500) return Response(data="submitted to analysis", status=status.HTTP_200_OK) def delete(self, request, *args, **kwargs): """ kills the running docker container """ client = docker.from_env() try: container = client.containers.get("FEniCSDocker") container.stop() return Response(data="container stopped successfully", status=200) except: return Response(data="No container running", status=404) class saveResults(APIView): parser_classes = [FileUploadParser] def put(self, request, filename, format=None, *args, **kwargs): """ save results to media folder. a query will be created to make it available for download """ project = get_object_or_404(projects, id=kwargs['project_id']) fileType = request.query_params.get('fileType') data = request.data['file'] folderPath = os.path.abspath( "../FEniCSUI/media/{}/results/".format(kwargs['project_id'])) os.makedirs(folderPath, exist_ok=True) filePath = '{}/{}.{}'.format(folderPath, filename, fileType) with open(filePath, 'wb+') as destination: for chunk in data.chunks(): destination.write(chunk) if not SolverResults.objects.filter(project=project).exists(): SolverResults.objects.create(project=project, path=folderPath) return Response(data="results updated at {}".format(filePath), status=status.HTTP_201_CREATED) class downloadResults(APIView): def get(self, request, *args, **kwargs): """ Get the results saved in the database """ project = get_object_or_404(projects, id=kwargs['project_id']) if (SolverResults.objects.filter(project=project).exists()): resutls = SolverResults.objects.filter(project=project).values()[0] folderPath = resutls['path'] # create a ZipFile object with ZipFile('{}/results.zip'.format(folderPath), 'w') as zipObj: # Iterate over all the files in directory for folderName, subfolders, filenames in os.walk(folderPath): for filename in filenames: if not filename == 'results.zip': filePath = os.path.join(folderName, filename) # Add file to zip zipObj.write(filePath, os.path.basename(filePath)) zipFile = open('{}/results.zip'.format(folderPath), 'rb') response= HttpResponse(zipFile,content_type='application/zip') response['Content-Disposition'] = 'attachment; filename=results.zip' return response else: return Response(data="not found", status=404) class solverProgress(APIView): parser_classes = [JSONParser] def get(self, request, *args, **kwargs): """ Get the progress """ project = get_object_or_404(projects, id=kwargs['project_id']) if (SolverProgress.objects.filter(project=project).exists()): progress = json.loads(get_object_or_404(SolverProgress, project=project).progress) logs = get_object_or_404(DockerLogs, project=project).log else: progress = "null" logs="" return Response(data=json.dumps({"state":progress,"logs":logs}), status=status.HTTP_200_OK) def post(self, request, *args, **kwargs): """ Update the progress from solver """ project = get_object_or_404(projects, id=kwargs['project_id']) data = request.data if SolverProgress.objects.filter(project=project).exists(): progress = get_object_or_404(SolverProgress, project=project) progress.progress = json.dumps(data) progress.save() else: SolverProgress.objects.create(project=project, progress=data) return Response(data=get_object_or_404(SolverProgress, project=project).progress, status=status.HTTP_201_CREATED)
2.296875
2
fs/opener/appfs.py
EnjoyLifeFund/macHighSierra-py36-pkgs
0
2523
<reponame>EnjoyLifeFund/macHighSierra-py36-pkgs<gh_stars>0 # coding: utf-8 """``AppFS`` opener definition. """ from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from .base import Opener from .errors import OpenerError from ..subfs import ClosingSubFS from .. import appfs class AppFSOpener(Opener): """``AppFS`` opener. """ protocols = [ 'userdata', 'userconf', 'sitedata', 'siteconf', 'usercache', 'userlog' ] _protocol_mapping = { 'userdata': appfs.UserDataFS, 'userconf': appfs.UserConfigFS, 'sitedata': appfs.SiteDataFS, 'siteconf': appfs.SiteConfigFS, 'usercache': appfs.UserCacheFS, 'userlog': appfs.UserLogFS } def open_fs(self, fs_url, parse_result, writeable, create, cwd): fs_class = self._protocol_mapping[parse_result.protocol] resource, delim, path = parse_result.resource.partition('/') tokens = resource.split(':', 3) if len(tokens) == 2: appname, author = tokens version = None elif len(tokens) == 3: appname, author, version = tokens else: raise OpenerError( 'resource should be <appname>:<author> ' 'or <appname>:<author>:<version>' ) app_fs = fs_class( appname, author=author, version=version, create=create ) app_fs = ( app_fs.opendir(path, factory=ClosingSubFS) if delim else app_fs ) return app_fs
1.984375
2
scripts/modeling_toolbox/evaluation.py
cyberj0g/verification-classifier
8
2524
import numpy as np from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix from sklearn.decomposition import PCA from sklearn import random_projection from sklearn import svm from sklearn.ensemble import IsolationForest import matplotlib.pyplot as plt from keras.layers import Dense, Input, Dropout from keras.models import Model from keras import regularizers from keras.models import Sequential from keras.optimizers import Adam from keras.regularizers import l2 from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier import xgboost as xgb def one_class_svm(x_train, x_test, x_attacks, svm_results): # SVM Hyper-parameters nus = [0.01] gammas = ['auto'] dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.35, 0.5, 0.75, 0.9, 1]] dimensions = list(filter(lambda x: x > 0, dimensions)) for n in dimensions: x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA', attack=x_attacks) for nu in nus: for gamma in gammas: # Fit classifier with PCA reduced data classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000) classifier.fit(x_reduced_pca) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca, test_reduced_pca, attack_reduced_pca) svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': n, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) # Fit classifier with RP reduced data classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000) classifier.fit(x_train) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train, x_test, x_attacks) svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': x_test.shape[1], 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'None'}, ignore_index=True) return svm_results def isolation_forest(x_train, x_test, x_attacks, isolation_results): # Isolation Forest Hyper-parameters estimators = [200, 100] contaminations = [0.01] dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.5, 0.9, 1]] dimensions = list(filter(lambda x: x > 0, dimensions)) for n in dimensions: x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA', attack=x_attacks) x_reduced_rp, test_reduced_rp, attack_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP', attack=x_attacks) max_features = list(range(1, n + 1, 4)) for estimator in estimators: for contamination in contaminations: for max_feature in max_features: classifier = IsolationForest(n_estimators=estimator, contamination=contamination, max_features=max_feature, n_jobs=7) classifier.fit(x_reduced_pca) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca, test_reduced_pca, attack_reduced_pca) isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination, 'n_components': n, 'max_features': max_feature, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'isolation_forest', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = IsolationForest(n_estimators=estimator, contamination=contamination, max_features=max_feature, n_jobs=7) classifier.fit(x_reduced_rp) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_rp, test_reduced_rp, attack_reduced_rp) isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination, 'n_components': n, 'max_features': max_feature, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'isolation_forest', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) return isolation_results def autoencoder(x_train, x_test, x_attacks, ae_svm_results): latent_dim = 3 input_vector = Input(shape=(x_train.shape[1],)) encoded = Dense(latent_dim, activation='relu')(input_vector) decoded = Dense(x_train.shape[1], activity_regularizer=regularizers.l1(10e-5))(encoded) autoencoder = Model(input_vector, decoded) encoder = Model(input_vector, encoded) autoencoder.compile(optimizer=Adam(lr=0.001), loss='mse') network_history = autoencoder.fit(x_train, x_train, shuffle=True, batch_size=16, epochs=10, validation_data=(x_test, x_test), verbose=True) plot_history(network_history, 'AE history') print('Mean loss on train: {}'.format(autoencoder.evaluate(x_train, x_train, batch_size=8, verbose=False))) print('Mean loss on test: {}'.format(autoencoder.evaluate(x_test, x_test, batch_size=8, verbose=False))) print('Mean loss on attacks: {}'.format(autoencoder.evaluate(x_attacks, x_attacks, batch_size=8, verbose=False))) x_train_red = encoder.predict(x_train, batch_size=8) x_test_red = encoder.predict(x_test, batch_size=8) x_attacks_red = encoder.predict(x_attacks, batch_size=8) nus = [0.01] gammas = [x_train_red.shape[1], 2*x_train_red.shape[1], x_train_red.shape[1]/2, 'auto'] for nu in nus: for gamma in gammas: classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000) classifier.fit(x_train_red) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train_red, x_test_red, x_attacks_red) ae_svm_results = ae_svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': latent_dim, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'ae-svm', 'auc': area, 'f_beta': fb}, ignore_index=True) return ae_svm_results def unsupervised_evaluation(classifier, train_set, test_set, attack_set, beta=20): y_pred_train = classifier.predict(train_set) y_pred_test = classifier.predict(test_set) y_pred_outliers = classifier.predict(attack_set) n_accurate_train = y_pred_train[y_pred_train == 1].size n_accurate_test = y_pred_test[y_pred_test == 1].size n_accurate_outliers = y_pred_outliers[y_pred_outliers == -1].size fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]), np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1) fb = fbeta_score(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]), np.concatenate([y_pred_test, y_pred_outliers]), beta=beta, pos_label=1) tnr = n_accurate_outliers/attack_set.shape[0] tpr_test = n_accurate_test/test_set.shape[0] tpr_train = n_accurate_train/train_set.shape[0] area = auc(fpr, tpr) return fb, area, tnr, tpr_train, tpr_test def neural_network(x_train, y_train, x_test, y_test): model = Sequential() model.add(Dense(128, input_shape=(x_train.shape[1],), activation='relu', kernel_regularizer=l2(0.01))) model.add(Dropout(0.1)) model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01))) model.add(Dropout(0.2)) model.add(Dense(128, kernel_initializer='glorot_uniform', activation='sigmoid')) model.add(Dropout(0.4)) model.add(Dense(64, kernel_initializer='glorot_uniform', activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(32, kernel_initializer='glorot_uniform', activation='tanh')) model.add(Dropout(0.4)) model.add(Dense(128, kernel_initializer='glorot_uniform', activation='tanh')) model.add(Dropout(0.3)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) network_history = model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=0, validation_data=(x_test, y_test)) plot_history_with_acc(network_history) return model def random_forest(x_train, y_train, x_test, y_test, random_forest_results): # Random forest Hyper-parameters estimators = [150, 200] dimensions = [int(i*x_test.shape[1]) for i in [1]] for estimator in estimators: for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) random_forest_results = random_forest_results.append({'estimators': estimator, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'random_forest', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) random_forest_results = random_forest_results.append({'estimators': estimator, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'random_forest', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7) classifier.fit(x_train, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test) random_forest_results = random_forest_results.append({'estimators': estimator, 'n_components': x_test.shape[1], 'TPR': tpr, 'TNR': tnr, 'model': 'random_forest', 'auc': area, 'f_beta': fb, 'projection': 'None'}, ignore_index=True) return random_forest_results def ada_boost(x_train, y_train, x_test, y_test, ada_boost_results): # AdaBoost Hyper-parameters learning_rates = [0.55] dimensions = [int(i*x_test.shape[1]) for i in [1]] for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') for lr in learning_rates: classifier = AdaBoostClassifier(learning_rate=lr) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) ada_boost_results = ada_boost_results.append({'LR': lr, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'ada_boost', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = AdaBoostClassifier(learning_rate=lr) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) ada_boost_results = ada_boost_results.append({'LR': lr, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'ada_boost', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) return ada_boost_results def svm_classifier(x_train, y_train, x_test, y_test, svm_results): # SVC Hyper-parameters dimensions = [int(i*x_test.shape[1]) for i in [1]] for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') classifier = svm.SVC(gamma='auto', cache_size=7000) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) svm_results = svm_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = svm.SVC(gamma='auto', cache_size=7000) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) svm_results = svm_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) return svm_results def xg_boost(x_train, y_train, x_test, y_test, xg_boost_results): # XGBoost Hyper-parameters dimensions = [int(i*x_test.shape[1]) for i in [1]] for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') classifier = xgb.XGBClassifier() grid = {'max_depth': 10} classifier.set_params(**grid) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) xg_boost_results = xg_boost_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'xgboost', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = xgb.XGBClassifier() grid = {'max_depth': 10} classifier.set_params(**grid) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) xg_boost_results = xg_boost_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'xgboost', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) classifier = xgb.XGBClassifier() grid = {'max_depth': 10} classifier.set_params(**grid) classifier.fit(x_train, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test) xg_boost_results = xg_boost_results.append({ 'n_components': x_test.shape[1], 'TPR': tpr, 'TNR': tnr, 'model': 'xgboost', 'auc': area, 'f_beta': fb, 'projection': 'None'}, ignore_index=True) return xg_boost_results def supervised_evaluation(classifier, x_test, y_test, beta=20, nn=False): if not nn: y_pred = classifier.predict(x_test) confusion_matrix(y_test, y_pred) fpr, tpr, _ = roc_curve(y_test, y_pred) fb = fbeta_score(y_test, y_pred, beta=beta, pos_label=1) area = auc(fpr, tpr) tpr = tpr[1] tnr = 1 - fpr[1] return fb, area, tnr, tpr def plot_roc(classifier, test, attacks, title): y_pred_test = classifier.predict(test) y_pred_outliers = classifier.predict(attacks) fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]), np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1) roc_auc = auc(fpr, tpr) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic: {}'.format(title)) plt.legend(loc='lower right') plt.show() def plot_roc_supervised(classifier, x_test, y_test, title, nn=False): y_pred = classifier.predict(x_test) fpr, tpr, _ = roc_curve(y_test, y_pred) if nn: y_pred = [round(x[0]) for x in y_pred] print(confusion_matrix(y_test, y_pred)) roc_auc = auc(fpr, tpr) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic {}'.format(title)) plt.legend(loc='lower right') plt.show() def plot_history(network_history, title): plt.figure(figsize=(10, 5)) plt.title(title) plt.xlabel('Epochs') plt.ylabel('Loss') plt.semilogy(network_history.history['loss']) plt.semilogy(network_history.history['val_loss']) plt.legend(['Training', 'Validation']) plt.show() def plot_history_with_acc(network_history, title='Loss and Accuracy'): plt.figure(figsize=(15, 10)) plt.subplot(211) plt.title(title) plt.xlabel('Epochs') plt.ylabel('Loss') plt.semilogy(network_history.history['loss']) plt.semilogy(network_history.history['val_loss']) plt.legend(['Training', 'Validation']) plt.subplot(212) plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.plot(network_history.history['acc']) plt.plot(network_history.history['val_acc']) plt.legend(['Training', 'Validation'], loc='lower right') plt.show() def reduce_dimensionality(n_components, train, test, method, attack=None): if method == 'PCA': matrix = PCA(n_components=n_components) elif method == 'RP': matrix = random_projection.SparseRandomProjection(n_components=n_components, random_state=7) else: print('unknown projection method, choose either RP or PCA') return None train = matrix.fit_transform(train) test = matrix.transform(test) if attack is None: return train, test attack = matrix.transform(attack) return train, test, attack
2.3125
2
tests/test_notifo_message.py
mrtazz/notifo.py
3
2525
# encoding: utf-8 import unittest import os import sys sys.path.append(os.getcwd()) from notifo import Notifo, send_message class TestNotifyUser(unittest.TestCase): def setUp(self): self.provider = "test_provider" self.provider_banned = "test_provider_msg_banned" self.user = "test_user" self.sender = "test_user2" self.banned = "test_user_banned" self.banned_token = "<KEY>" self.sender_token = "x633a05b18f7f65bf461ffb3900c6eb70eaafb0ed" self.provider_token = "<KEY>" self.provider_banned_token = "<KEY>" self.user_token = "<KEY>" def test_message(self): res = send_message(self.sender, self.sender_token, to=self.user, msg="foo test") self.assertEqual(2201, res["response_code"]) def test_message_with_object(self): res = Notifo(self.sender, self.sender_token).send_message( to=self.user, msg="foo test") self.assertEqual(2201, res["response_code"]) def test_message_banned(self): res = send_message(self.banned, self.banned_token, to=self.user, msg="foo test") self.assertEqual(403, res["response_code"]) def test_message_provider(self): res = send_message(self.provider, self.provider_token, to=self.user, msg="foo test") self.assertEqual(2201, res["response_code"]) def test_message_provider_banned(self): res = send_message(self.provider_banned, self.provider_banned_token, to=self.user, msg="foo test") self.assertEqual(403, res["response_code"]) if __name__ == '__main__': unittest.main()
2.546875
3
geoist/cattools/Smoothing.py
wqqpp007/geoist
1
2526
<filename>geoist/cattools/Smoothing.py #!/usr/bin/env python # -*- coding: utf-8 -*- # import numpy as np import .Selection as Sel import .Exploration as Exp import .CatUtils as CU #----------------------------------------------------------------------------------------- def GaussWin (Dis, Sig): return np.exp(-(Dis**2)/(Sig**2.)) #----------------------------------------------------------------------------------------- def SmoothMFD (Db, a, Wkt, Window=GaussWin, Par=50., Delta=0.1, SphereGrid=False, Box=[], Buffer=[], Grid=[], Threshold=-100, Unwrap=False, ZeroRates=False): if Par <= 0: Par = np.inf # Catalogue selection DbS = Sel.AreaSelect(Db, Wkt, Owrite=0, Buffer=Buffer, Unwrap=Unwrap) x,y,z = Exp.GetHypocenter(DbS) # Creating the mesh grid P = CU.Polygon() P.Load(Wkt) # Unwrapping coordinates if Unwrap: x = [i if i > 0. else i+360. for i in x] P.Unwrap() if Grid: XY = [G for G in Grid if P.IsInside(G[0], G[1])] else: if SphereGrid: XY = P.SphereGrid(Delta=Delta, Unwrap=Unwrap) else: XY = P.CartGrid(Dx=Delta, Dy=Delta, Bounds=Box) Win = [] for xyP in XY: Win.append(0) for xyE in zip(x,y): Dis = CU.WgsDistance(xyP[1], xyP[0], xyE[1], xyE[0]) Win[-1] += Window(Dis, Par) # Scaling and normalising the rates Norm = np.sum(Win) A = []; X = []; Y = [] for I,W in enumerate(Win): aT = -np.inf if Norm > 0. and W > 0.: aT = a + np.log10(W/Norm) if aT < Threshold: # Filter below threshold aT = -np.inf if ZeroRates: A.append(aT) X.append(XY[I][0]) Y.append(XY[I][1]) else: if aT > -np.inf: A.append(aT) X.append(XY[I][0]) Y.append(XY[I][1]) if Unwrap: # Wrap back longitudes X = [x if x < 180. else x-360. for x in X] return X, Y, A
2.21875
2
hebsafeharbor/identifier/signals/lexicon_based_recognizer.py
dkarmon/HebSafeHarbor
3
2527
from typing import List from presidio_analyzer import EntityRecognizer, RecognizerResult, AnalysisExplanation from presidio_analyzer.nlp_engine import NlpArtifacts from hebsafeharbor.common.terms_recognizer import TermsRecognizer class LexiconBasedRecognizer(EntityRecognizer): """ A class which extends the EntityRecognizer (@Presidio) and recognize entities based on a lexicon """ DEFAULT_CONFIDENCE_LEVEL = 0.7 # expected confidence level for this recognizer def __init__(self, name: str, supported_entity: str, phrase_list: List[str], supported_language: str = "he", allowed_prepositions: List[str] = None): """ Initializes Hebrew LexiconBasedRecognizer :param name: recognizer's name :param supported_entity: entity type to be associated with the entities recognized by the lexicon based recognizer :param phrase_list: lexicon's phrases :param supported_language: the language that the recognizer supports. Hebrew is the default :param allowed_prepositions: prepositions that allowed to be recognized as part of the entity (in addition to the lexicon phrase itself). Empty list (which means prepositions are not allowed) is the default """ super().__init__(name=name, supported_entities=[supported_entity], supported_language=supported_language) self.terms_recognizer = TermsRecognizer(phrase_list) self.allowed_prepositions = allowed_prepositions if allowed_prepositions else [] def load(self) -> None: """No loading is required.""" pass def analyze( self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts ) -> List[RecognizerResult]: """ Recognize entities based on lexicon :param text: text for recognition :param entities: supported entities :param nlp_artifacts: artifacts of the nlp engine :return list of entities recognized based on the lexicon """ results = [] terms_offsets = self.terms_recognizer(text, prefixes=self.allowed_prepositions) # Iterate over the Automaton offsets and create Recognizer result for each of them for start_offset, length in terms_offsets: result = RecognizerResult( entity_type=self.supported_entities[0], start=start_offset, end=start_offset + length, score=self.DEFAULT_CONFIDENCE_LEVEL, analysis_explanation=AnalysisExplanation(self.name, self.DEFAULT_CONFIDENCE_LEVEL), recognition_metadata={RecognizerResult.RECOGNIZER_NAME_KEY: self.name} ) results.append(result) return results
2.9375
3
my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/clangd/subcommands_test.py
cyx233/vim_config
0
2528
# encoding: utf-8 # # Copyright (C) 2018 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function from __future__ import division from hamcrest.core.base_matcher import BaseMatcher from hamcrest import ( assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp ) from pprint import pprint import requests import os.path from ycmd.tests.clangd import ( IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized ) from ycmd.tests.test_utils import ( BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady ) from ycmd.utils import ReadFile # This test is isolated to trigger objcpp hooks, rather than fetching completer # from cache. @IsolatedYcmd() def Subcommands_DefinedSubcommands_test( app ): file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' ) RunAfterInitialized( app, { 'request': { 'completer_target': 'filetype_default', 'line_num': 10, 'column_num': 3, 'filetype': 'objcpp', 'filepath': file_path }, 'expect': { 'response': requests.codes.ok, 'data': contains( *sorted( [ 'ExecuteCommand', 'FixIt', 'Format', 'GetDoc', 'GetDocImprecise', 'GetType', 'GetTypeImprecise', 'GoTo', 'GoToDeclaration', 'GoToDefinition', 'GoToImprecise', 'GoToInclude', 'GoToReferences', 'RefactorRename', 'RestartServer' ] ) ) }, 'route': '/defined_subcommands', } ) @SharedYcmd def Subcommands_GoTo_ZeroBasedLineAndColumn_test( app ): file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' ) RunAfterInitialized( app, { 'request': { 'contents': ReadFile( file_path ), 'completer_target': 'filetype_default', 'command_arguments': [ 'GoToDefinition' ], 'line_num': 10, 'column_num': 3, 'filetype': 'cpp', 'filepath': file_path }, 'expect': { 'response': requests.codes.ok, 'data': { 'filepath': os.path.abspath( file_path ), 'line_num': 2, 'column_num': 8 } }, 'route': '/run_completer_command', } ) @SharedYcmd def RunGoToTest_all( app, folder, command, test ): filepath = PathToTestFile( folder, test[ 'req' ][ 0 ] ) common_request = { 'completer_target' : 'filetype_default', 'filepath' : filepath, 'command_arguments': [ command ], 'contents' : ReadFile( filepath ), 'filetype' : 'cpp' } request = common_request request.update( { 'line_num' : test[ 'req' ][ 1 ], 'column_num': test[ 'req' ][ 2 ], } ) response = test[ 'res' ] if isinstance( response, list ): expect = { 'response': requests.codes.ok, 'data': contains( *[ LocationMatcher( PathToTestFile( folder, os.path.normpath( location[ 0 ] ) ), location[ 1 ], location[ 2 ] ) for location in response ] ) } elif isinstance( response, tuple ): expect = { 'response': requests.codes.ok, 'data': LocationMatcher( PathToTestFile( folder, os.path.normpath( response[ 0 ] ) ), response[ 1 ], response[ 2 ] ) } else: expect = { 'response': requests.codes.internal_server_error, 'data': ErrorMatcher( RuntimeError, test[ 'res' ] ) } RunAfterInitialized( app, { 'request': request, 'route' : '/run_completer_command', 'expect' : expect } ) def Subcommands_GoTo_all_test(): tests = [ # Local::x -> definition/declaration of x { 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) }, # Local::in_line -> definition/declaration of Local::in_line { 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) }, # Local -> definition/declaration of Local { 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) }, # Local::out_of_line -> definition of Local::out_of_line { 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 14, 13 ) }, # GoToDeclaration alternates between definition and declaration { 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) }, { 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) }, # test -> definition and declaration of test { 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) }, { 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) }, # Unicøde { 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) }, # Another_Unicøde { 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) }, # Expected failures { 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' }, { 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' }, ] for test in tests: for cmd in [ 'GoToDefinition', 'GoTo', 'GoToImprecise' ]: yield RunGoToTest_all, '', cmd, test def Subcommands_GoToDeclaration_all_test(): tests = [ # Local::x -> definition/declaration of x { 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) }, # Local::in_line -> definition/declaration of Local::in_line { 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) }, # Local -> definition/declaration of Local { 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) }, # Local::out_of_line -> declaration of Local::out_of_line { 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 11, 10 ) }, # GoToDeclaration alternates between definition and declaration { 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) }, { 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) }, # test -> definition and declaration of test { 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) }, { 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) }, # Unicøde { 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) }, # Another_Unicøde { 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) }, # Expected failures { 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' }, { 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' }, ] for test in tests: yield RunGoToTest_all, '', 'GoToDeclaration', test def Subcommands_GoToInclude_test(): tests = [ { 'req': ( 'main.cpp', 1, 6 ), 'res': ( 'a.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 2, 14 ), 'res': ( 'system/a.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 3, 1 ), 'res': ( 'quote/b.hpp', 1, 1 ) }, # FIXME: should fail since b.hpp is included with angled brackets but its # folder is added with -iquote. { 'req': ( 'main.cpp', 4, 10 ), 'res': ( 'quote/b.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 5, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 6, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) }, # Expected failures { 'req': ( 'main.cpp', 7, 1 ), 'res': 'Cannot jump to location' }, { 'req': ( 'main.cpp', 10, 13 ), 'res': 'Cannot jump to location' }, ] for test in tests: for cmd in [ 'GoToInclude', 'GoTo', 'GoToImprecise' ]: yield RunGoToTest_all, 'test-include', cmd, test def Subcommands_GoToReferences_test(): tests = [ # Function { 'req': ( 'goto.cc', 14, 21 ), 'res': [ ( 'goto.cc', 11, 10 ), ( 'goto.cc', 14, 13 ), ( 'goto.cc', 25, 22 ) ] }, # Namespace { 'req': ( 'goto.cc', 24, 17 ), 'res': [ ( 'goto.cc', 2, 11 ), ( 'goto.cc', 14, 6 ), ( 'goto.cc', 23, 14 ), ( 'goto.cc', 24, 15 ), ( 'goto.cc', 25, 15 ) ] }, # Expected failure { 'req': ( 'goto.cc', 27, 8 ), 'res': 'Cannot jump to location' }, ] for test in tests: yield RunGoToTest_all, '', 'GoToReferences', test @SharedYcmd def RunGetSemanticTest( app, filepath, filetype, test, command, response = requests.codes.ok ): contents = ReadFile( filepath ) common_args = { 'completer_target' : 'filetype_default', 'command_arguments': command, 'line_num' : 10, 'column_num' : 3, 'filepath' : filepath, 'contents' : contents, 'filetype' : filetype } args = test[ 0 ] if response == requests.codes.ok: if not isinstance( test[ 1 ], BaseMatcher ): expected = has_entry( 'message', contains_string( test[ 1 ] ) ) else: expected = has_entry( 'message', test[ 1 ] ) else: expected = test[ 1 ] request = common_args request.update( args ) test = { 'request': request, 'route': '/run_completer_command', 'expect': { 'response': response, 'data': expected } } RunAfterInitialized( app, test ) def Subcommands_GetType_test(): tests = [ # Basic pod types [ { 'line_num': 24, 'column_num': 3 }, 'Foo' ], # [ { 'line_num': 12, 'column_num': 2 }, 'Foo' ], [ { 'line_num': 12, 'column_num': 8 }, 'Foo' ], [ { 'line_num': 12, 'column_num': 9 }, 'Foo' ], [ { 'line_num': 12, 'column_num': 10 }, 'Foo' ], # [ { 'line_num': 13, 'column_num': 3 }, 'int' ], [ { 'line_num': 13, 'column_num': 7 }, 'int' ], # [ { 'line_num': 15, 'column_num': 7 }, 'char' ], # Function # [ { 'line_num': 22, 'column_num': 2 }, 'int main()' ], [ { 'line_num': 22, 'column_num': 6 }, 'int main()' ], # Declared and canonical type # On Ns:: [ { 'line_num': 25, 'column_num': 3 }, 'namespace Ns' ], # On Type (Type) # [ { 'line_num': 25, 'column_num': 8 }, # 'Ns::Type => Ns::BasicType<char>' ], # On "a" (Ns::Type) # [ { 'line_num': 25, 'column_num': 15 }, # 'Ns::Type => Ns::BasicType<char>' ], # [ { 'line_num': 26, 'column_num': 13 }, # 'Ns::Type => Ns::BasicType<char>' ], # Cursor on decl for refs & pointers [ { 'line_num': 39, 'column_num': 3 }, 'Foo' ], [ { 'line_num': 39, 'column_num': 11 }, 'Foo &' ], [ { 'line_num': 39, 'column_num': 15 }, 'Foo' ], [ { 'line_num': 40, 'column_num': 3 }, 'Foo' ], [ { 'line_num': 40, 'column_num': 11 }, 'Foo *' ], [ { 'line_num': 40, 'column_num': 18 }, 'Foo' ], # [ { 'line_num': 42, 'column_num': 3 }, 'const Foo &' ], [ { 'line_num': 42, 'column_num': 16 }, 'const struct Foo &' ], # [ { 'line_num': 43, 'column_num': 3 }, 'const Foo *' ], [ { 'line_num': 43, 'column_num': 16 }, 'const struct Foo *' ], # Cursor on usage [ { 'line_num': 45, 'column_num': 13 }, 'const struct Foo' ], # [ { 'line_num': 45, 'column_num': 19 }, 'const int' ], [ { 'line_num': 46, 'column_num': 13 }, 'const struct Foo *' ], # [ { 'line_num': 46, 'column_num': 20 }, 'const int' ], [ { 'line_num': 47, 'column_num': 12 }, 'Foo' ], [ { 'line_num': 47, 'column_num': 17 }, 'int' ], [ { 'line_num': 48, 'column_num': 12 }, 'Foo *' ], [ { 'line_num': 48, 'column_num': 18 }, 'int' ], # Auto in declaration # [ { 'line_num': 28, 'column_num': 3 }, 'struct Foo &' ], # [ { 'line_num': 28, 'column_num': 11 }, 'struct Foo &' ], [ { 'line_num': 28, 'column_num': 18 }, 'struct Foo' ], # [ { 'line_num': 29, 'column_num': 3 }, 'Foo *' ], # [ { 'line_num': 29, 'column_num': 11 }, 'Foo *' ], [ { 'line_num': 29, 'column_num': 18 }, 'Foo' ], # [ { 'line_num': 31, 'column_num': 3 }, 'const Foo &' ], # [ { 'line_num': 31, 'column_num': 16 }, 'const Foo &' ], # [ { 'line_num': 32, 'column_num': 3 }, 'const Foo *' ], # [ { 'line_num': 32, 'column_num': 16 }, 'const Foo *' ], # Auto in usage # [ { 'line_num': 34, 'column_num': 14 }, 'const Foo' ], # [ { 'line_num': 34, 'column_num': 21 }, 'const int' ], # [ { 'line_num': 35, 'column_num': 14 }, 'const Foo *' ], # [ { 'line_num': 35, 'column_num': 22 }, 'const int' ], [ { 'line_num': 36, 'column_num': 13 }, 'Foo' ], [ { 'line_num': 36, 'column_num': 19 }, 'int' ], # [ { 'line_num': 37, 'column_num': 13 }, 'Foo *' ], [ { 'line_num': 37, 'column_num': 20 }, 'int' ], # Unicode [ { 'line_num': 51, 'column_num': 13 }, 'Unicøde *' ], # Bound methods # On Win32, methods pick up an __attribute__((thiscall)) to annotate their # calling convention. This shows up in the type, which isn't ideal, but # also prohibitively complex to try and strip out. [ { 'line_num': 53, 'column_num': 15 }, matches_regexp( r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ], [ { 'line_num': 54, 'column_num': 18 }, matches_regexp( r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ], ] for subcommand in [ 'GetType', 'GetTypeImprecise' ]: for test in tests: yield ( RunGetSemanticTest, PathToTestFile( 'GetType_Clang_test.cc' ), 'cpp', test, [ subcommand ] ) def Subcommands_GetDoc_test(): tests = [ # from local file [ { 'line_num': 5, 'column_num': 10 }, 'docstring', requests.codes.ok ], # from header [ { 'line_num': 6, 'column_num': 10 }, 'docstring', requests.codes.ok ], # no docstring [ { 'line_num': 7, 'column_num': 7 }, 'int x = 3', requests.codes.ok ], # no hover [ { 'line_num': 8, 'column_num': 1 }, ErrorMatcher( RuntimeError, 'No hover information.' ), requests.codes.server_error ] ] for subcommand in [ 'GetDoc', 'GetDocImprecise' ]: for test in tests: yield ( RunGetSemanticTest, PathToTestFile( 'GetDoc_Clang_test.cc' ), 'cpp', test, [ subcommand ], test[ 2 ] ) @SharedYcmd def RunFixItTest( app, line, column, lang, file_path, check ): contents = ReadFile( file_path ) language_options = { 'cpp11': { 'filetype' : 'cpp', }, 'cuda': { 'filetype' : 'cuda', }, 'objective-c': { 'filetype' : 'objc', }, } args = { 'completer_target' : 'filetype_default', 'contents' : contents, 'filepath' : file_path, 'command_arguments': [ 'FixIt' ], 'line_num' : line, 'column_num' : column, } args.update( language_options[ lang ] ) test = { 'request': args, 'route': '/detailed_diagnostic' } # First get diags. diags = RunAfterInitialized( app, test ) while 'message' in diags and 'diagnostics' in diags[ 'message' ].lower(): receive_diags = { 'request': args, 'route': '/receive_messages' } RunAfterInitialized( app, receive_diags ) diags = RunAfterInitialized( app, test ) results = app.post_json( '/run_completer_command', BuildRequest( **args ) ).json pprint( results ) check( results ) def FixIt_Check_cpp11_Ins( results ): # First fixit # switch(A()) { // expected-error{{explicit conversion to}} assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'static_cast<int>(' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 16, 'column_num': 10 } ), 'end' : has_entries( { 'line_num': 16, 'column_num': 10 } ), } ), } ), has_entries( { 'replacement_text': equal_to( ')' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 16, 'column_num': 13 } ), 'end' : has_entries( { 'line_num': 16, 'column_num': 13 } ), } ), } ) ), 'location': has_entries( { 'line_num': 16, 'column_num': 0 } ) } ) ) } ) ) def FixIt_Check_cpp11_InsMultiLine( results ): # Similar to FixIt_Check_cpp11_1 but inserts split across lines # assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'static_cast<int>(' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 26, 'column_num': 7 } ), 'end' : has_entries( { 'line_num': 26, 'column_num': 7 } ), } ), } ), has_entries( { 'replacement_text': equal_to( ')' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 28, 'column_num': 2 } ), 'end' : has_entries( { 'line_num': 28, 'column_num': 2 } ), } ), } ) ), 'location': has_entries( { 'line_num': 25, 'column_num': 14 } ) } ) ) } ) ) def FixIt_Check_cpp11_Del( results ): # Removal of :: assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 35, 'column_num': 7 } ), 'end' : has_entries( { 'line_num': 35, 'column_num': 9 } ), } ), } ) ), 'location': has_entries( { 'line_num': 35, 'column_num': 7 } ) } ) ) } ) ) def FixIt_Check_cpp11_Repl( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'foo' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 40, 'column_num': 6 } ), 'end' : has_entries( { 'line_num': 40, 'column_num': 9 } ), } ), } ) ), 'location': has_entries( { 'line_num': 40, 'column_num': 6 } ) } ) ) } ) ) def FixIt_Check_cpp11_DelAdd( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 48, 'column_num': 3 } ), 'end' : has_entries( { 'line_num': 48, 'column_num': 4 } ), } ), } ), has_entries( { 'replacement_text': equal_to( '~' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 48, 'column_num': 9 } ), 'end' : has_entries( { 'line_num': 48, 'column_num': 9 } ), } ), } ), ), 'location': has_entries( { 'line_num': 48, 'column_num': 3 } ) } ), has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '= default;' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 48, 'column_num': 15 } ), 'end' : has_entries( { 'line_num': 48, 'column_num': 17 } ), } ), } ), ), 'location': has_entries( { 'line_num': 48, 'column_num': 3 } ) } ), ) } ) ) def FixIt_Check_objc( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'id' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 5, 'column_num': 3 } ), 'end' : has_entries( { 'line_num': 5, 'column_num': 3 } ), } ), } ) ), 'location': has_entries( { 'line_num': 5, 'column_num': 3 } ) } ) ) } ) ) def FixIt_Check_objc_NoFixIt( results ): # and finally, a warning with no fixits assert_that( results, equal_to( { 'fixits': [] } ) ) def FixIt_Check_cpp11_MultiFirst( results ): assert_that( results, has_entries( { 'fixits': contains( # first fix-it at 54,16 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'foo' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 15 } ) } ), # second fix-it at 54,52 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ), } ), } ), has_entries( { 'replacement_text': equal_to( '~' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ), } ), } ), ), 'location': has_entries( { 'line_num': 54, 'column_num': 15 } ) } ), has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '= default;' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 64 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 15 } ) } ), ) } ) ) def FixIt_Check_cpp11_MultiSecond( results ): assert_that( results, has_entries( { 'fixits': contains( # first fix-it at 54,16 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'foo' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 51 } ) } ), # second fix-it at 54,52 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ), } ), } ), has_entries( { 'replacement_text': equal_to( '~' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ), } ), } ), ), 'location': has_entries( { 'line_num': 54, 'column_num': 51 } ) } ), has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '= default;' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 64 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 51 } ) } ), ) } ) ) def FixIt_Check_unicode_Ins( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '=' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 21, 'column_num': 9 } ), 'end' : has_entries( { 'line_num': 21, 'column_num': 11 } ), } ), } ) ), 'location': has_entries( { 'line_num': 21, 'column_num': 16 } ) } ) ) } ) ) def FixIt_Check_cpp11_Note( results ): assert_that( results, has_entries( { 'fixits': contains( # First note: put parens around it has_entries( { 'text': contains_string( 'parentheses around the assignment' ), 'chunks': contains( ChunkMatcher( '(', LineColMatcher( 59, 8 ), LineColMatcher( 59, 8 ) ), ChunkMatcher( ')', LineColMatcher( 61, 12 ), LineColMatcher( 61, 12 ) ) ), 'location': LineColMatcher( 60, 1 ), } ), # Second note: change to == has_entries( { 'text': contains_string( '==' ), 'chunks': contains( ChunkMatcher( '==', LineColMatcher( 60, 8 ), LineColMatcher( 60, 9 ) ) ), 'location': LineColMatcher( 60, 1 ), } ), # Unresolved, requires /resolve_fixit request has_entries( { 'text': 'Extract subexpression to variable', 'resolve': True, 'command': has_entries( { 'command': 'clangd.applyTweak' } ) } ) ) } ) ) def FixIt_Check_cpp11_SpellCheck( results ): assert_that( results, has_entries( { 'fixits': contains( # Change to SpellingIsNotMyStrongPoint has_entries( { 'text': contains_string( "change 'SpellingIsNotMyStringPiont' to " "'SpellingIsNotMyStrongPoint'" ), 'chunks': contains( ChunkMatcher( 'SpellingIsNotMyStrongPoint', LineColMatcher( 72, 9 ), LineColMatcher( 72, 35 ) ) ), 'location': LineColMatcher( 72, 9 ), } ) ) } ) ) def FixIt_Check_cuda( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': contains_string( "change 'int' to 'void'" ), 'chunks': contains( ChunkMatcher( 'void', LineColMatcher( 3, 12 ), LineColMatcher( 3, 15 ) ) ), 'location': LineColMatcher( 3, 12 ), } ) ) } ) ) def FixIt_Check_SubexprExtract_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': 'Extract subexpression to variable', 'chunks': contains( ChunkMatcher( 'auto dummy = foo(i + 3);\n ', LineColMatcher( 84, 3 ), LineColMatcher( 84, 3 ) ), ChunkMatcher( 'dummy', LineColMatcher( 84, 10 ), LineColMatcher( 84, 22 ) ), ) } ) ) } ) ) def FixIt_Check_RawStringReplace_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': 'Convert to raw string', 'chunks': contains( ChunkMatcher( 'R"(\\\\r\\asd\n\\v)"', LineColMatcher( 80, 19 ), LineColMatcher( 80, 36 ) ), ) } ) ) } ) ) def FixIt_Check_MacroExpand_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': "Expand macro 'DECLARE_INT'", 'chunks': contains( ChunkMatcher( 'int i', LineColMatcher( 83, 3 ), LineColMatcher( 83, 17 ) ), ) } ) ) } ) ) def FixIt_Check_AutoExpand_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': "Expand auto type", 'chunks': contains( ChunkMatcher( 'const char *', LineColMatcher( 80, 1 ), LineColMatcher( 80, 6 ) ), ) } ) ) } ) ) def Subcommands_FixIt_all_test(): cfile = PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) mfile = PathToTestFile( 'objc', 'FixIt_Clang_objc.m' ) cufile = PathToTestFile( 'cuda', 'fixit_test.cu' ) ufile = PathToTestFile( 'unicode.cc' ) tests = [ # L # i C # n o # e l Lang File, Checker [ 16, 0, 'cpp11', cfile, FixIt_Check_cpp11_Ins ], [ 25, 14, 'cpp11', cfile, FixIt_Check_cpp11_InsMultiLine ], [ 35, 7, 'cpp11', cfile, FixIt_Check_cpp11_Del ], [ 40, 6, 'cpp11', cfile, FixIt_Check_cpp11_Repl ], [ 48, 3, 'cpp11', cfile, FixIt_Check_cpp11_DelAdd ], [ 5, 3, 'objective-c', mfile, FixIt_Check_objc ], [ 7, 1, 'objective-c', mfile, FixIt_Check_objc_NoFixIt ], [ 3, 12, 'cuda', cufile, FixIt_Check_cuda ], # multiple errors on a single line; both with fixits [ 54, 15, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ], # should put closest fix-it first? [ 54, 51, 'cpp11', cfile, FixIt_Check_cpp11_MultiSecond ], # unicode in line for fixit [ 21, 16, 'cpp11', ufile, FixIt_Check_unicode_Ins ], # FixIt attached to a "child" diagnostic (i.e. a Note) [ 60, 1, 'cpp11', cfile, FixIt_Check_cpp11_Note ], # FixIt due to forced spell checking [ 72, 9, 'cpp11', cfile, FixIt_Check_cpp11_SpellCheck ], ] for test in tests: yield RunFixItTest, test[ 0 ], test[ 1 ], test[ 2 ], test[ 3 ], test[ 4 ] @WithRetry @SharedYcmd def RunRangedFixItTest( app, rng, expected ): contents = ReadFile( PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) ) args = { 'completer_target' : 'filetype_default', 'contents' : contents, 'filepath' : PathToTestFile( 'FixIt_Clang_cpp11.cpp' ), 'command_arguments': [ 'FixIt' ], 'range' : rng, 'filetype' : 'cpp' } app.post_json( '/event_notification', CombineRequest( args, { 'event_name': 'FileReadyToParse', } ), expect_errors = True ) WaitUntilCompleterServerReady( app, 'cpp' ) response = app.post_json( '/run_completer_command', BuildRequest( **args ) ).json args[ 'fixit' ] = response[ 'fixits' ][ 0 ] response = app.post_json( '/resolve_fixit', BuildRequest( **args ) ).json print( 'Resolved fixit response = ' ) print( response ) expected( response ) def Subcommands_FixIt_Ranged_test(): expand_auto_range = { 'start': { 'line_num': 80, 'column_num': 1 }, 'end': { 'line_num': 80, 'column_num': 4 }, } subexpression_extract_range = { 'start': { 'line_num': 84, 'column_num': 14 }, 'end': { 'line_num': 84, 'column_num': 20 }, } macro_expand_range = { 'start': { 'line_num': 83, 'column_num': 3 }, 'end': { 'line_num': 83, 'column_num': 13 }, } raw_string_range = { 'start': { 'line_num': 80, 'column_num': 19 }, 'end': { 'line_num': 80, 'column_num': 35 }, } tests = [ [ expand_auto_range, FixIt_Check_AutoExpand_Resolved ], [ macro_expand_range, FixIt_Check_MacroExpand_Resolved ], [ subexpression_extract_range, FixIt_Check_SubexprExtract_Resolved ], [ raw_string_range, FixIt_Check_RawStringReplace_Resolved ], ] for test in tests: yield RunRangedFixItTest, test[ 0 ], test[ 1 ] @WithRetry @SharedYcmd def Subcommands_FixIt_AlreadyResolved_test( app ): filename = PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) request = { 'completer_target' : 'filetype_default', 'contents' : ReadFile( filename ), 'filepath' : filename, 'command_arguments': [ 'FixIt' ], 'line_num' : 16, 'column_num' : 1, 'filetype' : 'cpp' } app.post_json( '/event_notification', CombineRequest( request, { 'event_name': 'FileReadyToParse', } ), expect_errors = True ) WaitUntilCompleterServerReady( app, 'cpp' ) expected = app.post_json( '/run_completer_command', BuildRequest( **request ) ).json print( 'expected = ' ) print( expected ) request[ 'fixit' ] = expected[ 'fixits' ][ 0 ] actual = app.post_json( '/resolve_fixit', BuildRequest( **request ) ).json print( 'actual = ' ) print( actual ) assert_that( actual, equal_to( expected ) ) @SharedYcmd def Subcommands_RefactorRename_test( app ): test = { 'request': { 'filetype': 'cpp', 'completer_target': 'filetype_default', 'contents': ReadFile( PathToTestFile( 'basic.cpp' ) ), 'filepath': PathToTestFile( 'basic.cpp' ), 'command_arguments': [ 'RefactorRename', 'Bar' ], 'line_num': 17, 'column_num': 4, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'Bar', LineColMatcher( 1, 8 ), LineColMatcher( 1, 11 ) ), ChunkMatcher( 'Bar', LineColMatcher( 9, 3 ), LineColMatcher( 9, 6 ) ), ChunkMatcher( '\n\n', LineColMatcher( 12, 2 ), LineColMatcher( 15, 1 ) ), ChunkMatcher( 'Bar', LineColMatcher( 15, 8 ), LineColMatcher( 15, 11 ) ), ChunkMatcher( ' ', LineColMatcher( 15, 46 ), LineColMatcher( 16, 1 ) ), ChunkMatcher( 'Bar', LineColMatcher( 17, 3 ), LineColMatcher( 17, 6 ) ), ChunkMatcher( '', LineColMatcher( 17, 14 ), LineColMatcher( 17, 15 ) ), ChunkMatcher( ' ', LineColMatcher( 17, 17 ), LineColMatcher( 17, 17 ) ), ChunkMatcher( ' ', LineColMatcher( 17, 19 ), LineColMatcher( 17, 19 ) ), ) } ) ) } ) }, 'route': '/run_completer_command' } RunAfterInitialized( app, test )
1.492188
1
sentiment/config.py
TheRensselaerIDEA/covid19_tweet_ids
0
2529
""" Config class containing all the settings for running sentiment scoring tool """ import jsonpickle class Config(object): """Container for sentiment scoring tool settings. """ def __init__(self): """Initializes the Config instance. """ #Elasticsearch settings self.elasticsearch_host = "" self.elasticsearch_verify_certs = False self.elasticsearch_index_name = "" self.elasticsearch_batch_size = 500 self.elasticsearch_timeout_secs = 30 #Processing settings self.sentiment_modelpath = "" self.sentiment_max_seq_length = 512 self.sleep_idle_secs = 5 self.sleep_not_idle_secs = 0.01 self.log_level = "ERROR" @staticmethod def load(filepath): """Loads the config from a JSON file. Args: filepath: path of the JSON file. """ with open(filepath, "r") as file: json = file.read() config = jsonpickle.decode(json) return config
3.03125
3
tests/ut/python/dataset/test_invert.py
GuoSuiming/mindspore
4
2530
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Testing Invert op in DE """ import numpy as np import mindspore.dataset as ds import mindspore.dataset.transforms.py_transforms import mindspore.dataset.vision.py_transforms as F import mindspore.dataset.vision.c_transforms as C from mindspore import log as logger from util import visualize_list, save_and_check_md5, diff_mse DATA_DIR = "../data/dataset/testImageNetData/train/" GENERATE_GOLDEN = False def test_invert_py(plot=False): """ Test Invert python op """ logger.info("Test Invert Python op") # Original Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) # Color Inverted Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.Invert(), F.ToTensor()]) ds_invert = data_set.map(operations=transforms_invert, input_columns="image") ds_invert = ds_invert.batch(512) for idx, (image, _) in enumerate(ds_invert): if idx == 0: images_invert = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_invert = np.append(images_invert, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = np.mean((images_invert[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_invert) def test_invert_c(plot=False): """ Test Invert Cpp op """ logger.info("Test Invert cpp op") # Original Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_original = [C.Decode(), C.Resize(size=[224, 224])] ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = image.asnumpy() else: images_original = np.append(images_original, image.asnumpy(), axis=0) # Invert Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transform_invert = [C.Decode(), C.Resize(size=[224, 224]), C.Invert()] ds_invert = data_set.map(operations=transform_invert, input_columns="image") ds_invert = ds_invert.batch(512) for idx, (image, _) in enumerate(ds_invert): if idx == 0: images_invert = image.asnumpy() else: images_invert = np.append(images_invert, image.asnumpy(), axis=0) if plot: visualize_list(images_original, images_invert) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_invert[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) def test_invert_py_c(plot=False): """ Test Invert Cpp op and python op """ logger.info("Test Invert cpp and python op") # Invert Images in cpp data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"]) ds_c_invert = data_set.map(operations=C.Invert(), input_columns="image") ds_c_invert = ds_c_invert.batch(512) for idx, (image, _) in enumerate(ds_c_invert): if idx == 0: images_c_invert = image.asnumpy() else: images_c_invert = np.append(images_c_invert, image.asnumpy(), axis=0) # invert images in python data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"]) transforms_p_invert = mindspore.dataset.transforms.py_transforms.Compose([lambda img: img.astype(np.uint8), F.ToPIL(), F.Invert(), np.array]) ds_p_invert = data_set.map(operations=transforms_p_invert, input_columns="image") ds_p_invert = ds_p_invert.batch(512) for idx, (image, _) in enumerate(ds_p_invert): if idx == 0: images_p_invert = image.asnumpy() else: images_p_invert = np.append(images_p_invert, image.asnumpy(), axis=0) num_samples = images_c_invert.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_p_invert[i], images_c_invert[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_c_invert, images_p_invert, visualize_mode=2) def test_invert_one_channel(): """ Test Invert cpp op with one channel image """ logger.info("Test Invert C Op With One Channel Images") c_op = C.Invert() try: data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])], input_columns=["image"]) data_set.map(operations=c_op, input_columns="image") except RuntimeError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "The shape" in str(e) def test_invert_md5_py(): """ Test Invert python op with md5 check """ logger.info("Test Invert python op with md5 check") # Generate dataset data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Invert(), F.ToTensor()]) data = data_set.map(operations=transforms_invert, input_columns="image") # Compare with expected md5 from images filename = "invert_01_result_py.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) def test_invert_md5_c(): """ Test Invert cpp op with md5 check """ logger.info("Test Invert cpp op with md5 check") # Generate dataset data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_invert = [C.Decode(), C.Resize(size=[224, 224]), C.Invert(), F.ToTensor()] data = data_set.map(operations=transforms_invert, input_columns="image") # Compare with expected md5 from images filename = "invert_01_result_c.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) if __name__ == "__main__": test_invert_py(plot=False) test_invert_c(plot=False) test_invert_py_c(plot=False) test_invert_one_channel() test_invert_md5_py() test_invert_md5_c()
2.125
2
puzzle/tests/test_candy.py
aliciawyy/dmining
0
2531
from parameterized import parameterized from numpy.testing import TestCase from .. import candy class TestCollectCandies(TestCase): @parameterized.expand( [(5, 5, 12, [[2, 1, 1, 1, 1], [2, 2, 1, 1, 1], [1, 2, 1, 1, 1], [2, 2, 1, 1, 3], [2, 2, 2, 2, 2]])] ) def test_candy(self, n, m, t, candies): collector = candy.CollectCandies(n, m, t, candies) for pos, expected in [[(1, 1), [(0, 1), (2, 1), (1, 0), (1, 2)]], [(0, 0), [(1, 0), (0, 1)]], [(4, 4), [(3, 4), (4, 3)]]]: self.assertListEqual( collector.get_next_positions(pos), expected + [pos]) self.assertEqual(collector.get_max_sum(), 27)
2.59375
3
audio.py
fernandoq/quiz-show
0
2532
import time import subprocess import os print os.uname() if not os.uname()[0].startswith("Darw"): import pygame pygame.mixer.init() # Plays a song def playSong(filename): print "play song" if not os.uname()[0].startswith("Darw"): pygame.mixer.music.fadeout(1000) #fadeout current music over 1 sec. pygame.mixer.music.load("music/" + filename) pygame.mixer.music.play() else: subprocess.call(["afplay", "music/" + filename])
2.5625
3
tests/test_dynamodbHandler.py
unfoldingWord-dev/python-aws-tools
1
2533
from __future__ import absolute_import, unicode_literals, print_function import mock import unittest import d43_aws_tools as aws_tools from boto3.dynamodb.conditions import Attr class DynamoDBHandlerTests(unittest.TestCase): @classmethod def setUpClass(cls): with mock.patch("d43_aws_tools.dynamodb_handler.boto3", mock.MagicMock()): cls.handler = aws_tools.dynamodb_handler.DynamoDBHandler("table_name") cls.handler.table = mock.MagicMock() def setUp(self): self.handler.table.reset_mock() def test_get_item(self): """Test a successful invocation of `get_item`.""" expected = dict(field1="1", field2="2") self.handler.table.get_item.return_value = { "Item": expected } self.assertEqual(self.handler.get_item("key"), expected) def test_get_item_malformed(self): """Test an unsuccessful invocation of `get_item`.""" self.handler.table.get_item.return_value = { "TheWrongKey": dict(field1="1", field2="2") } self.assertIsNone(self.handler.get_item("key")) def test_insert_item(self): """Test a successful invocation of `insert_item`.""" data = dict(x="x", y="y", three=3) self.handler.insert_item(data) self.handler.table.put_item.assert_called_once_with(Item=data) def test_update_item(self): """Test a successful invocation of `update_item`.""" key = {"id": 1} data = {"age": 40, "name": "<NAME>"} self.handler.update_item(key, data) self.handler.table.update_item.assert_called_once() _, kwargs = self.handler.table.update_item.call_args self.assertIn("Key", kwargs) self.assertEqual(kwargs["Key"], key) self.assertIn("UpdateExpression", kwargs) # ignore whitespace and order of assignments expr = kwargs["UpdateExpression"].replace(" ", "") self.assertTrue(expr.startswith("SET")) self.assertIn("age=:age", expr) self.assertIn("#item_name=:name", expr) self.assertIn("ExpressionAttributeValues", kwargs) self.assertEqual(kwargs["ExpressionAttributeValues"], {":age": 40, ":name": "<NAME>"}) self.assertIn("ExpressionAttributeNames", kwargs) self.assertEqual(kwargs["ExpressionAttributeNames"], {"#item_name": "name"}) def test_delete_item(self): """Test a successful invocation of `delete_item`.""" key = {"id": 1234} self.handler.delete_item(key) self.handler.table.delete_item.assert_called_once_with(Key=key) def test_query_item(self): """ Test a successful invocation of `query_item`.""" for cond in ("ne", "lt", "lte", "gt", "gte", "begins_with", "is_in", "contains"): self.handler.table.reset_mock() query = { "age": { "condition": "eq", "value": 25 }, "full_name": { "condition": cond, "value": "<NAME>" } } data = {"age": 30, "full_name": "<NAME>"} self.handler.table.scan.return_value = {"Items": data} self.assertEqual(self.handler.query_items(query), data) self.handler.table.scan.assert_called_once() def test_query_bool_item(self): """ Test a successful invocation of `query_item`. with a False boolean query""" for cond in ("ne", "lt", "lte", "gt", "gte", "begins_with", "is_in", "contains"): self.handler.table.reset_mock() query = { "ready": False } data = {"age": 30, "full_name": "<NAME>", "ready": False} self.handler.table.scan.return_value = {"Items": data} self.assertEqual(self.handler.query_items(query), data) self.handler.table.scan.assert_called_once() err_msg = 'query_items: Expecting FilterExpression parameter for table.scan() but non found' try: self.handler.table.scan.assert_called_once_with() # If the scan ran without an argument this is a failure self.assertTrue(False, err_msg) except Exception as e: if err_msg in str(e): raise e def test_query_item_no_query(self): """Test a invocation of `query_item` with no query.""" data = {"age": 30, "full_name": "<NAME>"} self.handler.table.scan.return_value = {"Items": data} self.assertEqual(self.handler.query_items(), data) self.handler.table.scan.assert_called_once_with()
2.671875
3
app.py
sbustamante/heroku_app
0
2534
<filename>app.py from dash import Dash, html, dcc import plotly.express as px import pandas as pd app = Dash(__name__) server = app.server # assume you have a "long-form" data frame # see https://plotly.com/python/px-arguments/ for more options df = pd.DataFrame({ "Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"], "Amount": [4, 1, 2, 2, 4, 5], "City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"] }) fig = px.bar(df, x="Fruit", y="Amount", color="City", barmode="group") app.layout = html.Div(children=[ html.H1(children='Hello Dash'), html.Div(children=''' Dash: A web application framework for your data. '''), dcc.Graph( id='example-graph', figure=fig ) ]) if __name__ == '__main__': app.run_server(debug=True)
3.484375
3
peter_sslers/web/lib/form_utils.py
aptise/peter_sslers
35
2535
<filename>peter_sslers/web/lib/form_utils.py # pypi import six # local from ...lib import db as lib_db from ...lib import utils from ...model import objects as model_objects from ...model import utils as model_utils from . import formhandling # ============================================================================== def decode_args(getcreate_args): """ support for Python2/3 """ if six.PY3: for (k, v) in list(getcreate_args.items()): if isinstance(v, bytes): getcreate_args[k] = v.decode("utf8") return getcreate_args # standardized mapping for `model_utils.DomainsChallenged` to a formStash DOMAINS_CHALLENGED_FIELDS = { "http-01": "domain_names_http01", "dns-01": "domain_names_dns01", } class AcmeAccountUploadParser(object): """ An AcmeAccount may be uploaded multiple ways: * a single PEM file * an intra-associated three file triplet from a Certbot installation This parser operates on a validated FormEncode results object (via `pyramid_formencode_classic`) """ # overwritten in __init__ getcreate_args = None formStash = None # tracked acme_account_provider_id = None account_key_pem = None le_meta_jsons = None le_pkey_jsons = None le_reg_jsons = None private_key_cycle_id = None private_key_technology_id = None upload_type = None # pem OR letsencrypt def __init__(self, formStash): self.formStash = formStash self.getcreate_args = {} def require_new(self, require_contact=None, require_technology=True): """ routine for creating a NEW AcmeAccount (peter_sslers generates the credentials) :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic :param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ formStash = self.formStash acme_account_provider_id = formStash.results.get( "acme_account_provider_id", None ) if acme_account_provider_id is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="acme_account_provider_id", message="No provider submitted." ) private_key_cycle = formStash.results.get("account__private_key_cycle", None) if private_key_cycle is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_cycle", message="No PrivateKey cycle submitted.", ) private_key_cycle_id = model_utils.PrivateKeyCycle.from_string( private_key_cycle ) private_key_technology_id = None private_key_technology = formStash.results.get( "account__private_key_technology", None ) if private_key_technology: private_key_technology_id = model_utils.KeyTechnology.from_string( private_key_technology ) if not private_key_technology_id and require_technology: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_technology", message="No PrivateKey technology submitted.", ) contact = formStash.results.get("account__contact", None) if not contact and require_contact: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__contact", message="`account__contact` is required.", ) getcreate_args = {} self.contact = getcreate_args["contact"] = contact self.acme_account_provider_id = getcreate_args[ "acme_account_provider_id" ] = acme_account_provider_id self.private_key_cycle_id = getcreate_args[ "private_key_cycle_id" ] = private_key_cycle_id self.private_key_technology_id = getcreate_args[ "private_key_technology_id" ] = private_key_technology_id self.getcreate_args = decode_args(getcreate_args) def require_upload(self, require_contact=None, require_technology=None): """ routine for uploading an exiting AcmeAccount+AcmeAccountKey :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic :param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ formStash = self.formStash # ------------------- # do a quick parse... requirements_either_or = ( ( "account_key_file_pem", # "acme_account_provider_id", ), ( "account_key_file_le_meta", "account_key_file_le_pkey", "account_key_file_le_reg", ), ) failures = [] passes = [] for idx, option_set in enumerate(requirements_either_or): option_set_results = [ True if formStash.results[option_set_item] is not None else False for option_set_item in option_set ] # if we have any item, we need all of them if any(option_set_results): if not all(option_set_results): failures.append( "If any of %s is provided, all must be provided." % str(option_set) ) else: passes.append(idx) if (len(passes) != 1) or failures: # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form( "You must upload `account_key_file_pem` or all of (`account_key_file_le_meta`, `account_key_file_le_pkey`, `account_key_file_le_reg`)." ) # ------------------- # validate the provider option # will be None unless a pem is uploaded # required for PEM, ignored otherwise acme_account_provider_id = formStash.results.get( "acme_account_provider_id", None ) private_key_cycle = formStash.results.get("account__private_key_cycle", None) if private_key_cycle is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_cycle", message="No PrivateKey cycle submitted.", ) private_key_cycle_id = model_utils.PrivateKeyCycle.from_string( private_key_cycle ) private_key_technology_id = None private_key_technology = formStash.results.get( "account__private_key_technology", None ) if private_key_technology is not None: private_key_technology_id = model_utils.KeyTechnology.from_string( private_key_technology ) if not private_key_technology_id and require_technology: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_technology", message="No PrivateKey technology submitted.", ) # require `contact` when uploading a PEM file if formStash.results["account_key_file_pem"] is not None: require_contact = True contact = formStash.results.get("account__contact") if not contact and require_contact: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__contact", message="`account__contact` is required.", ) getcreate_args = {} self.contact = getcreate_args["contact"] = contact self.private_key_cycle_id = getcreate_args[ "private_key_cycle_id" ] = private_key_cycle_id self.private_key_technology_id = getcreate_args[ "private_key_technology_id" ] = private_key_technology_id if formStash.results["account_key_file_pem"] is not None: if acme_account_provider_id is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="acme_account_provider_id", message="No provider submitted." ) self.upload_type = "pem" self.acme_account_provider_id = getcreate_args[ "acme_account_provider_id" ] = acme_account_provider_id self.account_key_pem = getcreate_args[ "key_pem" ] = formhandling.slurp_file_field(formStash, "account_key_file_pem") else: # note that we use `jsonS` to indicate a string self.le_meta_jsons = getcreate_args[ "le_meta_jsons" ] = formhandling.slurp_file_field(formStash, "account_key_file_le_meta") self.le_pkey_jsons = getcreate_args[ "le_pkey_jsons" ] = formhandling.slurp_file_field(formStash, "account_key_file_le_pkey") self.le_reg_jsons = getcreate_args[ "le_reg_jsons" ] = formhandling.slurp_file_field(formStash, "account_key_file_le_reg") self.getcreate_args = decode_args(getcreate_args) class _PrivateKeyUploadParser(object): """ A PrivateKey is not a complex upload to parse itself This code exists to mimic the AcmeAccount uploading. """ # overwritten in __init__ getcreate_args = None formStash = None # tracked private_key_pem = None upload_type = None # pem def __init__(self, formStash): self.formStash = formStash self.getcreate_args = {} def require_upload(self): """ routine for uploading an exiting PrivateKey """ formStash = self.formStash getcreate_args = {} if formStash.results["private_key_file_pem"] is not None: self.upload_type = "pem" self.private_key_pem = getcreate_args[ "key_pem" ] = formhandling.slurp_file_field(formStash, "private_key_file_pem") self.getcreate_args = decode_args(getcreate_args) class _AcmeAccountSelection(object): """ Class used to manage an uploaded AcmeAccount """ selection = None upload_parsed = None # instance of AcmeAccountUploadParser or None AcmeAccount = None class _PrivateKeySelection(object): selection = None upload_parsed = None # instance of AcmeAccountUploadParser or None private_key_strategy__requested = None PrivateKey = None @property def private_key_strategy_id__requested(self): return model_utils.PrivateKeyStrategy.from_string( self.private_key_strategy__requested ) def parse_AcmeAccountSelection( request, formStash, account_key_option=None, allow_none=None, require_contact=None, ): """ :param formStash: an instance of `pyramid_formencode_classic.FormStash` :param account_key_option: :param allow_none: :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ account_key_pem = None account_key_pem_md5 = None dbAcmeAccount = None is_global_default = None # handle the explicit-option acmeAccountSelection = _AcmeAccountSelection() if account_key_option == "account_key_file": # this will handle form validation and raise errors. parser = AcmeAccountUploadParser(formStash) # this will have: `contact`, `private_key_cycle`, `private_key_technology` parser.require_upload(require_contact=require_contact) # update our object acmeAccountSelection.selection = "upload" acmeAccountSelection.upload_parsed = parser return acmeAccountSelection else: if account_key_option == "account_key_global_default": acmeAccountSelection.selection = "global_default" account_key_pem_md5 = formStash.results["account_key_global_default"] is_global_default = True elif account_key_option == "account_key_existing": acmeAccountSelection.selection = "existing" account_key_pem_md5 = formStash.results["account_key_existing"] elif account_key_option == "account_key_reuse": acmeAccountSelection.selection = "reuse" account_key_pem_md5 = formStash.results["account_key_reuse"] elif account_key_option == "none": if not allow_none: # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form( "This form does not support no AcmeAccount selection." ) # note the lowercase "none"; this is an explicit "no item" selection # only certain routes allow this acmeAccountSelection.selection = "none" account_key_pem_md5 = None return acmeAccountSelection else: formStash.fatal_form( message="Invalid `account_key_option`", ) if not account_key_pem_md5: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=account_key_option, message="You did not provide a value" ) dbAcmeAccount = lib_db.get.get__AcmeAccount__by_pemMd5( request.api_context, account_key_pem_md5, is_active=True ) if not dbAcmeAccount: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=account_key_option, message="The selected AcmeAccount is not enrolled in the system.", ) if is_global_default and not dbAcmeAccount.is_global_default: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=account_key_option, message="The selected AcmeAccount is not the current default.", ) acmeAccountSelection.AcmeAccount = dbAcmeAccount return acmeAccountSelection # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form("There was an error validating your form.") def parse_PrivateKeySelection(request, formStash, private_key_option=None): private_key_pem = None private_key_pem_md5 = None PrivateKey = None # :class:`model.objects.PrivateKey` # handle the explicit-option privateKeySelection = _PrivateKeySelection() if private_key_option == "private_key_file": # this will handle form validation and raise errors. parser = _PrivateKeyUploadParser(formStash) parser.require_upload() # update our object privateKeySelection.selection = "upload" privateKeySelection.upload_parsed = parser privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["upload"] ) return privateKeySelection else: if private_key_option == "private_key_existing": privateKeySelection.selection = "existing" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["existing"] ) private_key_pem_md5 = formStash.results["private_key_existing"] elif private_key_option == "private_key_reuse": privateKeySelection.selection = "reuse" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["reuse"] ) private_key_pem_md5 = formStash.results["private_key_reuse"] elif private_key_option in ( "private_key_generate", "private_key_for_account_key", ): dbPrivateKey = lib_db.get.get__PrivateKey__by_id(request.api_context, 0) if not dbPrivateKey: formStash.fatal_field( field=private_key_option, message="Could not load the placeholder PrivateKey.", ) privateKeySelection.PrivateKey = dbPrivateKey if private_key_option == "private_key_generate": privateKeySelection.selection = "generate" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["generate"] ) elif private_key_option == "private_key_for_account_key": privateKeySelection.selection = "private_key_for_account_key" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy[ "private_key_for_account_key" ] ) return privateKeySelection else: # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form("Invalid `private_key_option`") if not private_key_pem_md5: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=private_key_option, message="You did not provide a value" ) dbPrivateKey = lib_db.get.get__PrivateKey__by_pemMd5( request.api_context, private_key_pem_md5, is_active=True ) if not dbPrivateKey: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=private_key_option, message="The selected PrivateKey is not enrolled in the system.", ) privateKeySelection.PrivateKey = dbPrivateKey return privateKeySelection # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form("There was an error validating your form.") def form_key_selection(request, formStash, require_contact=None): """ :param formStash: an instance of `pyramid_formencode_classic.FormStash` :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ acmeAccountSelection = parse_AcmeAccountSelection( request, formStash, account_key_option=formStash.results["account_key_option"], require_contact=require_contact, ) if acmeAccountSelection.selection == "upload": key_create_args = acmeAccountSelection.upload_parsed.getcreate_args key_create_args["event_type"] = "AcmeAccount__insert" key_create_args[ "acme_account_key_source_id" ] = model_utils.AcmeAccountKeySource.from_string("imported") (dbAcmeAccount, _is_created,) = lib_db.getcreate.getcreate__AcmeAccount( request.api_context, **key_create_args ) acmeAccountSelection.AcmeAccount = dbAcmeAccount privateKeySelection = parse_PrivateKeySelection( request, formStash, private_key_option=formStash.results["private_key_option"], ) if privateKeySelection.selection == "upload": key_create_args = privateKeySelection.upload_parsed.getcreate_args key_create_args["event_type"] = "PrivateKey__insert" key_create_args[ "private_key_source_id" ] = model_utils.PrivateKeySource.from_string("imported") key_create_args["private_key_type_id"] = model_utils.PrivateKeyType.from_string( "standard" ) ( dbPrivateKey, _is_created, ) = lib_db.getcreate.getcreate__PrivateKey__by_pem_text( request.api_context, **key_create_args ) privateKeySelection.PrivateKey = dbPrivateKey elif privateKeySelection.selection == "generate": dbPrivateKey = lib_db.get.get__PrivateKey__by_id(request.api_context, 0) if not dbPrivateKey: formStash.fatal_field( field="private_key_option", message="Could not load the placeholder PrivateKey for autogeneration.", ) privateKeySelection.PrivateKey = dbPrivateKey return (acmeAccountSelection, privateKeySelection) def form_domains_challenge_typed(request, formStash, http01_only=False): domains_challenged = model_utils.DomainsChallenged() domain_names_all = [] try: # 1: iterate over the submitted domains by segment for (target_, source_) in DOMAINS_CHALLENGED_FIELDS.items(): submitted_ = formStash.results.get(source_) if submitted_: # this function checks the domain names match a simple regex # it will raise a `ValueError("invalid domain")` on the first invalid domain submitted_ = utils.domains_from_string(submitted_) if submitted_: domain_names_all.extend(submitted_) domains_challenged[target_] = submitted_ # 2: ensure there are domains if not domain_names_all: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="no domain names submitted", ) # 3: ensure there is no overlap domain_names_all_set = set(domain_names_all) if len(domain_names_all) != len(domain_names_all_set): # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="a domain name can only be associated to one challenge type", ) # 4: maybe we only want http01 domains submitted? if http01_only: for (k, v) in domains_challenged.items(): if k == "http-01": continue if v: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="only http-01 domains are accepted by this form", ) except ValueError as exc: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="invalid domain names detected" ) return domains_challenged def form_single_domain_challenge_typed(request, formStash, challenge_type="http-01"): domains_challenged = model_utils.DomainsChallenged() # this function checks the domain names match a simple regex domain_names = utils.domains_from_string(formStash.results["domain_name"]) if not domain_names: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field(field="domain_name", message="Found no domain names") if len(domain_names) != 1: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="domain_name", message="This endpoint currently supports only 1 domain name", ) domains_challenged[challenge_type] = domain_names return domains_challenged
2.046875
2
AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py
jlamperez/Vitis-Tutorials
1
2536
<reponame>jlamperez/Vitis-Tutorials<filename>AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py # # Copyright 2020–2021 Xilinx, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from math import * import random def GenerateTestVector(dtval,pliow,NPhases_s,NStreams_s,NSamples_s,NFrames_s,SeqType_s,Basename_s): print('DtVal : ',dtval.get()) print('PLIO width : ',pliow.get()) print('NPhases : ',NPhases_s.get()) print('NStreams : ',NStreams_s.get()) print('NSamples : ',NSamples_s.get()) print('NFrames : ',NFrames_s.get()) print('Type of Sequence : ',SeqType_s.get()) print('Base filename : ',Basename_s.get()) NPhases = int(NPhases_s.get()) NStreams = int(NStreams_s.get()) LFrame = int(NSamples_s.get()) NFrames = int(NFrames_s.get()) SequenceType = SeqType_s.get() Basename = Basename_s.get() #parameters that should be in the GUI # SequenceType ='Linear' # 'SinCos' 'Linear' 'Random' 'Dirac' # Basename = 'PhaseIn' NSamples = NPhases*NStreams*LFrame*NFrames; NSamples1 = NPhases*NStreams*LFrame*(NFrames+1); # A little longer to allow for delay in streams NBitsData = 32; if( dtval.get() == 'int16'): NBitsData = 16 HasImag = 0 if (dtval.get() == 'cint16'): HasImag = 1 if(SequenceType != 'SinCos' and SequenceType != 'Linear' and SequenceType != 'Random' and SequenceType != 'Dirac'): print ('Unknown Sequence Type') return # Create the overall signal that will be distributed over all streams # it is already separated in phases S = np.zeros((NPhases,int(NSamples1/NPhases),1+HasImag)) for i in range(int(NSamples1/NPhases)): for p in range (NPhases): k = i*NPhases+p if (SequenceType == 'SinCos'): vr = int(5000*cos(6.28*5/(NPhases*NStreams*LFrame)*k)) vi = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k)) elif (SequenceType == 'Linear'): vr = k vi = -k elif (SequenceType == 'Random'): vr = random.randint(-5000,5000) vi = random.randint(-5000,5000) elif (SequenceType == 'Dirac'): vr = 0 vi = 0 if(k%151 == 1): vr = 1 elif(k%151 == 40): vi = 1 elif(k%151 == 81): vr = 2 elif(k%151 == 115): vi = -2 # if(k%311 == 50): # vr = 1 # S[p,i,0] = # if(HasImag==1): # S[p,i,1] = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k)) S[p,i,0] = vr if (HasImag == 1 ): S[p,i,1] = vi PLIOwidth = int(pliow.get()) NSamplesPerLine = int(PLIOwidth/NBitsData) # Data are read in blocks of 128 bits (4 data in cint16) # Create an Input test Vector in TestInputS.txt FileNames = []; # Easiest case: 1 stream per AI Engine if (NStreams == 1): #Creates list of filenames for Phi in range(NPhases): FileNames.append(Basename+'_'+str(Phi)+'.txt') #Open all files fds = [open(path, 'w') for path in FileNames] #Fill all files with the right data for p in range(NPhases): fd = fds[p] for s in range(int(NSamples1/NPhases/NSamplesPerLine)): for d in range(NSamplesPerLine): index = s*NSamplesPerLine + d fd.write(str(int(S[p,index,0]))+' ') if(HasImag): fd.write(str(int(S[p,index,1]))+' ') fd.write('\n') for fd in fds: fd.close() if (NStreams == 2): #Creates list of filenames for Phi in range(NPhases): for Stream in range(NStreams): FileNames.append('PhaseIn_'+str(Phi)+'_'+str(Stream)+'.txt') # Hash table to associate data to streams NSamplesIn128bits = int(128/NBitsData ) H = np.zeros((int(NSamples1/NPhases/2),2)) H = H.astype('int32') index = np.zeros(2) index = index.astype('int32') for s in range(int(NSamples1/NPhases)): k = int(s/NSamplesIn128bits) # Block order i = k%2 # Which streams H[index[i],i] = s index[i] = index[i]+1 #Open all files fds = [open(path, 'w') for path in FileNames] #Fill all files with the right data for p in range(NPhases): for stream in range(2): fd = fds[2*p+stream] for s in range(int(NSamples1/NPhases/NSamplesPerLine/NStreams)): for d in range(NSamplesPerLine): index = s*NSamplesPerLine + d fd.write(str(int(S[p,H[index,stream],0]))+' ') if(HasImag): fd.write(str(int(S[p,H[index,stream],1]))+' ') fd.write('\n') for fd in fds: fd.close()
2.03125
2
Object Oriented Programming/Lecture 01/Intro.py
ashish-ad/Python-Projects
1
2537
item1='phone' item1_price = 100 item1_quantity = 5 item1_price_total = item1_price * item1_quantity print(type(item1)) # str print(type(item1_price)) # int print(type(item1_quantity)) # int print(type(item1_price_total)) # int # output: # <class 'str'> # <class 'int'> # <class 'int'> # <class 'int'>
3.84375
4
configs/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_16_36WoodBlock.py
THU-DA-6D-Pose-Group/self6dpp
33
2538
<gh_stars>10-100 _base_ = "./FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_Pbr_01_02MasterChefCan.py" OUTPUT_DIR = "output/deepim/ycbvPbrSO/FlowNet512_1.5AugCosyAAEGray_NoiseRandom_AggressiveR_ClipGrad_fxfy1_Dtw01_LogDz_PM10_Flat_ycbvPbr_SO/16_36WoodBlock" DATASETS = dict(TRAIN=("ycbv_036_wood_block_train_pbr",))
1.335938
1
sqlakeyset/__init__.py
jhihruei/sqlakeyset
0
2539
<filename>sqlakeyset/__init__.py from .columns import OC from .paging import get_page, select_page, process_args from .results import serialize_bookmark, unserialize_bookmark, Page, Paging __all__ = [ 'OC', 'get_page', 'select_page', 'serialize_bookmark', 'unserialize_bookmark', 'Page', 'Paging', 'process_args' ]
1.703125
2
low_rank_local_connectivity/models/simple_model.py
shaun95/google-research
1
2540
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Simple model for image classification. The model is multiple conv/locally_connected/wide_conv/low_rank_locally_connected layers followed by a fully connected layer. Changes to the model architecture can be made by modifying simple_model_config.py file. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import os import tensorflow.compat.v1 as tf from low_rank_local_connectivity import layers from low_rank_local_connectivity import utils MOMENTUM = 0.9 EPS = 1e-5 class SimpleNetwork(tf.keras.Model): """Locally Connected Network.""" def __init__(self, config, variable_scope='simple_network'): super(SimpleNetwork, self).__init__() self.variable_scope = variable_scope self.config = copy.deepcopy(config) filters_list = self.config.num_filters_list depth = len(filters_list) self.pass_is_training_list = [] self.layers_list = [] if self.config.num_channels < 1: raise ValueError('num_channels should be > 0') input_channels = self.config.num_channels if self.config.coord_conv: # Add two coordinate conv channels. input_channels = input_channels + 2 if len(self.config.layer_types) < depth: self.config.layer_types.extend( ['conv2d'] * (depth - len(self.config.layer_types))) chin = input_channels for i, (kernel_size, num_filters, strides, layer_type) in enumerate(zip( self.config.kernel_size_list, filters_list, self.config.strides_list, self.config.layer_types)): padding = 'valid' if layer_type == 'conv2d': chout = num_filters layer = tf.keras.layers.Conv2D( filters=chout, kernel_size=kernel_size, strides=(strides, strides), padding=padding, activation=None, use_bias=not self.config.batch_norm, kernel_initializer=self.config.kernel_initializer, name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type)) elif layer_type == 'wide_conv2d': # Conv. layer with equivalent params to low rank locally connected. if self.config.rank < 1: raise ValueError('rank should be > 0 for %s layer.' % layer_type) chout = int((self.config.rank * chin + num_filters) / float( chin + num_filters) * num_filters) layer = tf.keras.layers.Conv2D( filters=chout if i < (depth-1) else int(num_filters * self.config.rank), kernel_size=kernel_size, strides=(strides, strides), padding=padding, activation=None, use_bias=not self.config.batch_norm, kernel_initializer=self.config.kernel_initializer, name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type)) elif layer_type == 'locally_connected2d': # Full locally connected layer. chout = num_filters layer = tf.keras.layers.LocallyConnected2D( filters=chout, kernel_size=(kernel_size, kernel_size), strides=(strides, strides), padding=padding, activation=None, use_bias=True, # not self.config.batch_norm, name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type), kernel_initializer=self.config.kernel_initializer) elif layer_type == 'low_rank_locally_connected2d': if self.config.rank < 1: raise ValueError('rank should be > 0 for %s layer.' % layer_type) chout = num_filters layer = layers.LowRankLocallyConnected2D( filters=chout, kernel_size=(kernel_size, kernel_size), strides=(strides, strides), padding=padding, activation=None, use_bias=not self.config.batch_norm, name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type), kernel_initializer=self.config.kernel_initializer, combining_weights_initializer=( self.config.combining_weights_initializer), spatial_rank=self.config.rank, normalize_weights=self.config.normalize_weights, input_dependent=config.input_dependent, share_row_combining_weights=self.config.share_row_combining_weights, share_col_combining_weights=self.config.share_col_combining_weights) else: raise ValueError('Can not recognize layer %s type.' % layer_type) chin = chout self.layers_list.append(layer) self.pass_is_training_list.append(False) if self.config.batch_norm: layer = tf.keras.layers.BatchNormalization( trainable=True, momentum=MOMENTUM, epsilon=EPS) self.layers_list.append(layer) self.pass_is_training_list.append(True) layer = tf.keras.layers.ReLU() self.layers_list.append(layer) self.pass_is_training_list.append(False) if self.config.global_avg_pooling: self.layers_list.append(tf.keras.layers.GlobalAveragePooling2D()) else: self.layers_list.append(tf.keras.layers.Flatten()) self.pass_is_training_list.append(False) self.layers_list.append(tf.keras.layers.Dense( units=self.config.num_classes, activation=None, use_bias=True, name='logits')) self.pass_is_training_list.append(False) def __call__(self, images, is_training): endpoints = {} if self.config.coord_conv: # Append position channels. net = tf.concat([images, utils.position_channels(images)], axis=3) else: net = images for i, (pass_is_training, layer) in enumerate( zip(self.pass_is_training_list, self.layers_list)): net = layer(net, training=is_training) if pass_is_training else layer(net) endpoints['layer%d' % i] = net tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, layer.updates) self.add_update(layer.updates) logits = net return logits, endpoints
2.703125
3
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/icm20x_icm20948_gyro_data_rate_test.py
jacoblb64/pico_rgb_keypad_hid
47
2541
<reponame>jacoblb64/pico_rgb_keypad_hid # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT import time import board import busio from adafruit_icm20x import ICM20948 cycles = 200 i2c = busio.I2C(board.SCL, board.SDA) icm = ICM20948(i2c) # Cycle between two data rates # Best viewed in the Mu serial plotter where you can see how # the data rate affects the resolution of the data while True: icm.gyro_data_rate_divisor = 0 # minimum print("Data Rate:", icm.gyro_data_rate) time.sleep(2) for i in range(cycles): print(icm.gyro) icm.gyro_data_rate_divisor = 255 # maximum print("Data Rate:", icm.gyro_data_rate) time.sleep(2) for i in range(cycles): print(icm.gyro)
3.015625
3
1186.py
TheLurkingCat/TIOJ
0
2542
<gh_stars>0 a = int(input()) while a: for x in range(a-1): out = '*' + ' ' * (a-x-2) + '*' + ' ' * (a-x-2) + '*' print(out.center(2*a-1)) print('*' * (2 * a - 1)) for x in range(a-1): out = '*' + ' ' * x + '*' + ' ' * x + '*' print(out.center(2*a-1)) a = int(input())
3.546875
4
utils/common.py
initialed85/eds-cctv-system
0
2543
import datetime import json import os from pathlib import Path from types import SimpleNamespace from typing import List from typing import NamedTuple, Union, Optional, Callable from uuid import uuid3, NAMESPACE_DNS from dateutil.parser import parse _VIDEO_SUFFIXES = [".mkv", ".mp4"] _IMAGE_SUFFIXES = [".jpg"] _PERMITTED_EXTENSIONS = _VIDEO_SUFFIXES + _IMAGE_SUFFIXES class PathDetails(NamedTuple): path: Path event_id: Optional[int] camera_id: Optional[int] timestamp: datetime.datetime camera_name: str is_image: bool is_lowres: bool class Event(SimpleNamespace): event_id: str timestamp: Union[datetime.datetime, str] camera_name: str high_res_image_path: str low_res_image_path: str high_res_video_path: str low_res_video_path: str def get_sorted_paths(path: Path) -> List[Path]: return sorted(Path(path).iterdir(), key=os.path.getmtime) def format_timestamp_for_go(timestamp: Union[datetime.datetime, str]) -> str: if isinstance(timestamp, str): timestamp = parse(timestamp) us = timestamp.strftime("%f") tz_raw = timestamp.strftime("%z") tz = "{}:{}".format(tz_raw[0:3], tz_raw[3:]) return timestamp.strftime(f"%Y-%m-%dT%H:%M:%S.{us}00{tz}") def parse_paths(paths: List[Path], tzinfo: datetime.tzinfo, parse_method: Callable) -> List[PathDetails]: return [ y for y in [parse_method(path=x, tzinfo=tzinfo) for x in paths if x is not None] if y is not None ] def build_event_for_some_path_details(some_path_details: List[PathDetails], path: Path): if len(some_path_details) != 4: raise ValueError( f"expected some_path_details to be 4 long (and related); instead it was {len(some_path_details)} long" ) event_ids = list(set([x.event_id for x in some_path_details])) if len(event_ids) != 1: raise ValueError( f"expected all PathDetails to have a common event_id; instead they were {event_ids}" ) camera_ids = list(set([x.camera_id for x in some_path_details])) if len(camera_ids) != 1: raise ValueError( f"expected all PathDetails to have a common camera_id; instead they were {camera_ids}" ) camera_names = list(set([x.camera_name for x in some_path_details])) if len(camera_names) != 1: raise ValueError( f"expected all PathDetails to have a common camera_name; instead they were {camera_names}" ) high_res_image_paths = list( set([x.path for x in some_path_details if x.is_image and not x.is_lowres]) ) if len(high_res_image_paths) != 1: raise ValueError( f"expected to find 1 high_res_image_path from PathDetails; instead found {high_res_image_paths}" ) low_res_image_paths = list( set([x.path for x in some_path_details if x.is_image and x.is_lowres]) ) if len(low_res_image_paths) != 1: raise ValueError( f"expected to find 1 low_res_image_path from PathDetails; instead found {low_res_image_paths}" ) high_res_video_paths = list( set([x.path for x in some_path_details if not x.is_image and not x.is_lowres]) ) if len(high_res_video_paths) != 1: raise ValueError( f"expected to find 1 high_res_video_path from PathDetails; instead found {high_res_video_paths}" ) low_res_video_paths = list( set([x.path for x in some_path_details if not x.is_image and x.is_lowres]) ) if len(low_res_video_paths) != 1: raise ValueError( f"expected to find 1 low_res_video_path from PathDetails; instead found {low_res_video_paths}" ) timestamp = sorted([x.timestamp for x in some_path_details])[0] high_res_image_path = high_res_image_paths[0] low_res_image_path = low_res_image_paths[0] high_res_video_path = high_res_video_paths[0] low_res_video_path = low_res_video_paths[0] # in Go: # eventId := uuid.NewSHA1( # uuid.NameSpaceDNS, # []byte(fmt.Sprintf("%v, %v, %v, %v, %v", timestamp, highResImagePath, lowResImagePath, highResVideoPath, lowResVideoPath)), # ) event_id = uuid3( NAMESPACE_DNS, f"{format_timestamp_for_go(timestamp)}, {high_res_image_path}, {low_res_image_path}, {high_res_video_path}, {low_res_video_path}", ) return Event( event_id=str(event_id), timestamp=timestamp, camera_name=camera_names[0], high_res_image_path=str(path / high_res_image_path), low_res_image_path=str(path / low_res_image_path), high_res_video_path=str(path / high_res_video_path), low_res_video_path=str(path / low_res_video_path), ) def relate_path_details( some_path_details: List[PathDetails], get_key_methods: List[Callable] ) -> List[List[PathDetails]]: some_path_details_by_key = {} for path_details in some_path_details: keys = [x(path_details) for x in get_key_methods] for key in keys: some_path_details_by_key.setdefault(key, []) some_path_details_by_key[key] += [path_details] viable_some_path_details_by_key = { k: v for k, v in some_path_details_by_key.items() if len(v) == 4 } deduplicated_path_details = [] for some_path_details in viable_some_path_details_by_key.values(): if some_path_details not in deduplicated_path_details: deduplicated_path_details += [some_path_details] return deduplicated_path_details def build_events_for_related_path_details( related_path_details: List[List[PathDetails]], path: Path ) -> List[Event]: events: List[Event] = [] for some_path_details in related_path_details: events += [ build_event_for_some_path_details( some_path_details=some_path_details, path=path ) ] sorted_events = sorted(events, key=lambda x: x.timestamp) for event in sorted_events: event.timestamp = format_timestamp_for_go(timestamp=event.timestamp) return sorted_events def build_json_lines_from_events(events: List[Event]) -> str: return "\n".join( [ json.dumps( { "event_id": x.event_id, "timestamp": x.timestamp, "camera_name": x.camera_name, "high_res_image_path": x.high_res_image_path, "low_res_image_path": x.low_res_image_path, "high_res_video_path": x.high_res_video_path, "low_res_video_path": x.low_res_video_path, } ) for x in events ] ) def write_to_file(path: Path, data: str): with open(str(path), "w") as f: f.write(data) def rebuild_event_store(root_path: Path, tzinfo: datetime.tzinfo, json_path: Path, parse_method: Callable, get_key_methods: List[Callable]): print(f"getting sorted paths from {root_path}...") sorted_paths = get_sorted_paths(path=root_path) print(f"got {len(sorted_paths)} sorted paths") print("parsing sorted paths...") some_path_details = parse_paths(paths=sorted_paths, tzinfo=tzinfo, parse_method=parse_method) print(f"got {len(some_path_details)} parsed paths") print("relating parsed paths...") related_path_details = relate_path_details(some_path_details=some_path_details, get_key_methods=get_key_methods) print(f"got {len(related_path_details)} related paths") print("building events...") events = build_events_for_related_path_details( related_path_details=related_path_details, path=root_path ) print(f"built {len(events)} events") print("building json lines...") json_lines = build_json_lines_from_events(events=events) print(f"built {len(json_lines)} bytes") print(f"writing to {json_path}") write_to_file(path=json_path, data=json_lines) print("done.")
2.609375
3
schoolio/migrations/0005_auto_20190927_1423.py
schoolio-co/schoolio_site
0
2544
# Generated by Django 2.2.1 on 2019-09-27 14:23 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('schoolio', '0004_auto_20190927_0405'), ] operations = [ migrations.AlterField( model_name='student_assessment', name='assessment_mark', field=models.IntegerField(blank=True, null=True), ), migrations.AlterField( model_name='student_assessment', name='assessment_score', field=models.IntegerField(blank=True, null=True), ), migrations.AlterField( model_name='student_assessment', name='understanding_level', field=models.CharField(blank=True, max_length=150, null=True), ), ]
1.507813
2
taattack/_datasets/dataset.py
linerxliner/ValCAT
0
2545
<gh_stars>0 class Dataset: _data = None _first_text_col = 'text' _second_text_col = None _label_col = 'label' def __init__(self): self._idx = 0 if self._data is None: raise Exception('Dataset is not loaded') def __iter__(self): return self def __next__(self): if self._idx >= len(self._data): raise StopIteration else: item = self._data.iloc[self._idx] self._idx += 1 if self._second_text_col: return item[self._first_text_col], item[self._second_text_col], int(item[self._label_col]) else: return item[self._first_text_col], int(item[self._label_col]) def __getitem__(self, item): if isinstance(item, int): item = self._data.iloc[item] if self._second_text_col: return item[self._first_text_col], item[self._second_text_col], int(item[self._label_col]) else: return item[self._first_text_col], int(item[self._label_col]) elif isinstance(item, slice): start = item.start if item.start else 0 stop = item.stop if item.stop else len(self._data) step = item.step if item.step else 1 items = self._data.iloc[start:stop:step] if self._second_text_col: return [(item[self._first_text_col], item[self._second_text_col], int(item[self._label_col])) for _, item in items.iterrows()] else: return [(item[self._first_text_col], int(item[self._label_col])) for _, item in items.iterrows()] else: raise KeyError def __str__(self): return str(self._data)
2.765625
3
scripts/extract_gs_citations.py
akhilpandey95/scholarlyimpact
0
2546
<filename>scripts/extract_gs_citations.py #!/usr/bin/env python3 # This Source Code Form is subject to the terms of the MIT # License. If a copy of the same was not distributed with this # file, You can obtain one at # https://github.com/akhilpandey95/scholarlyimpact/blob/master/LICENSE. import os import csv import glob import json import requests import subprocess import numpy as np import pandas as pd from tqdm import tqdm from ast import literal_eval from fp.fp import FreeProxy from torrequest import TorRequest from scholarly import scholarly from collections import Counter, OrderedDict from operator import attrgetter # class definition for Rate limiting class RateLimiter: """ Class object for putting a rate limit on the number of requests made Parameters ---------- No arguments Returns ------- Nothing """ def __init__(self, maxRate=5, timeUnit=1): self.timeUnit = timeUnit self.deque = deque(maxlen=maxRate) def __call__(self): if self.deque.maxlen == len(self.deque): cTime = time.time() if cTime - self.deque[0] > self.timeUnit: self.deque.append(cTime) return False else: return True self.deque.append(time.time()) return False # function for obtaining the citations using the dimensions web url def get_gs_citations_web(title): """ Use the google scholar web URL and requests API to obtain the citations for a given title of a scholarly article Parameters ---------- arg1 | title: str The title of a scholarly article Returns ------- Dictionary dict """ while True: try: # call the lumproxy object scholarly.use_lum_proxy() # make the query query = scholarly.search_pubs(title) # come out break except Exception as e: # come out and try again break # return the response dict return next(query) # function for assigning new IP address def assign_new_ip(text=False): """ Reset the identity using TorRequest Parameters ---------- arg1 [OPTIONAL]| text: bool A boolean flag to return the IP address tuple (old, morphed) Returns ------- boolean True/False """ try: # pass the hashed password req = TorRequest(password='<PASSWORD>') # return the ip address normal_identity = requests.get('http://ipecho.net/plain') # reset the identity using Tor req.reset_identity() # make a request now morphed_identity = req.get('http://ipecho.net/plain') # return the status depending on the flag if morphed_identity != normal_identity: if text == True: # return the ip address pairs as a tuple return (normal_identity.text, morphed_identity.text) else: return True else: # return just the status return False except: return False # function for assigning a new proxy def set_new_proxy(text=True): """ Reset the identity using FreeProxy Parameters ---------- arg1 [OPTIONAL]| text: bool A boolean flag to return the IP address tuple (old, morphed) Returns ------- Address fp.fp.FreeProxy """ while True: # call the freeproxy object proxy = FreeProxy(rand=True, timeout=1).get() # allocate the proxy address to scholarly proxy_works = scholarly.use_proxy(http=proxy, https=proxy) # check it the ip address works if proxy_works: # come out break # print the ip address depending on the text argument if text: # print the working ip print("Working proxy:", proxy) # return the proxy details return proxy # function for connecting tor to scholarly def scholarly_init_connection(): """ Bind TorRequest to Scholarly service Parameters ---------- No arguments Returns ------- Nothing """ while True: # assign new tor identity ips = assign_new_ip(text=True) # use the tor request for scholarly tor_req = scholarly.use_tor(tor_sock_port=9050, \ tor_control_port=9051, \ tor_pw="<PASSWORD>") if tor_req: # come out of the loop, when successful break # print the tor identity print("Working Tor identity:", ips[1]) # function for restarting the system tor service def restart_tor_system_service(text=False): """ Use the os module to restart the tor service Parameters ---------- arg1 [OPTIONAL]| text: bool A boolean flag to return the status of the command Returns ------- Boolean bool """ # subprocess command for stopping the tor service tor_stop = subprocess.Popen(['service', 'tor', 'stop']) # subprocess command for restarting the tor service tor_restart = subprocess.Popen(['service', 'tor', 'restart']) # subprocess command for restarting the tor service tor_status = subprocess.Popen(['service', 'tor', 'status'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) # if the label is set to true then print the output if text: for output in tor_status.stdout.readlines(): print(output.strip()) # pipe out the stdout, stderr for the subprocess stdout, stderr = tor_status.communicate() if len(stderr) > 0: # return False return False else: # return true if successful return True def get_articleInfo(title): """ Use the google scholar web URL and requests API to obtain the citations for a given title of a scholarly article Parameters ---------- arg1 | title: str The title of a scholarly article Returns ------- Dictionary dict """ while True: try: # init the connection with scholarly and tor scholarly_init_connection() # search for the query search_query = scholarly.search_pubs(title) # print success print("Got the results of the query") # come out of the loop break except Exception as e: # print error message print("Attempt Failed, patching new tor identity") # restart the system tor service restart_tor_system_service(text=False) # assign new connection again scholarly_init_connection() # obtain the bib entry of the scholarly article pub = next(search_query) # return the bib entry return pub if __name__ == '__main__': # iterate over the length length_of_file = len(open('paper_titles.txt').readlines()) # place the contents of the list into a file alt_list = open('paper_titles.txt').readlines() # iterate over the length of the file # write the results to a file for i in tqdm(range(length_of_file)): alt_info = open('paper_titles.txt', 'r+') cit_info = open('citations_gs.csv', 'a') cit_info.write(str(alt_list[i].strip( ).split('\t')[0]) + ',' + str(get_articleInfo(alt_list[i].strip().split('\t')[1]))) cit_info.write('\n') cit_info.close() alt_info.seek(0) alt_info.truncate() alt_info.writelines(alt_list[i+1:]) alt_info.close()
2.671875
3
dist-packages/reportlab/pdfgen/pathobject.py
Jianwei-Wang/python2.7_lib
51
2547
<filename>dist-packages/reportlab/pdfgen/pathobject.py #Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfgen/pathobject.py __version__=''' $Id$ ''' __doc__=""" PDFPathObject is an efficient way to draw paths on a Canvas. Do not instantiate directly, obtain one from the Canvas instead. Progress Reports: 8.83, 2000-01-13, gmcm: created from pdfgen.py """ from reportlab.pdfgen import pdfgeom from reportlab.lib.rl_accel import fp_str class PDFPathObject: """Represents a graphic path. There are certain 'modes' to PDF drawing, and making a separate object to expose Path operations ensures they are completed with no run-time overhead. Ask the Canvas for a PDFPath with getNewPathObject(); moveto/lineto/ curveto wherever you want; add whole shapes; and then add it back into the canvas with one of the relevant operators. Path objects are probably not long, so we pack onto one line the code argument allows a canvas to get the operatiosn appended directly so avoiding the final getCode """ def __init__(self,code=None): self._code = (code,[])[code is None] self._code_append = self._init_code_append def _init_code_append(self,c): assert c.endswith(' m') or c.endswith(' re'), 'path must start with a moveto or rect' code_append = self._code.append code_append('n') code_append(c) self._code_append = code_append def getCode(self): "pack onto one line; used internally" return ' '.join(self._code) def moveTo(self, x, y): self._code_append('%s m' % fp_str(x,y)) def lineTo(self, x, y): self._code_append('%s l' % fp_str(x,y)) def curveTo(self, x1, y1, x2, y2, x3, y3): self._code_append('%s c' % fp_str(x1, y1, x2, y2, x3, y3)) def arc(self, x1,y1, x2,y2, startAng=0, extent=90): """Contributed to piddlePDF by <NAME>, 28/7/99. Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2, starting at startAng degrees and covering extent degrees. Angles start with 0 to the right (+x) and increase counter-clockwise. These should have x1<x2 and y1<y2. The algorithm is an elliptical generalization of the formulae in <NAME>'s TeX tutorial <URL: http://www.tinaja.com/bezarc1.pdf>.""" self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent)) def arcTo(self, x1,y1, x2,y2, startAng=0, extent=90): """Like arc, but draws a line from the current point to the start if the start is not the current point.""" self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent),'lineTo') def rect(self, x, y, width, height): """Adds a rectangle to the path""" self._code_append('%s re' % fp_str((x, y, width, height))) def ellipse(self, x, y, width, height): """adds an ellipse to the path""" self._curves(pdfgeom.bezierArc(x, y, x + width,y + height, 0, 360)) def _curves(self,curves,initial='moveTo'): getattr(self,initial)(*curves[0][:2]) for curve in curves: self.curveTo(*curve[2:]) def circle(self, x_cen, y_cen, r): """adds a circle to the path""" x1 = x_cen - r y1 = y_cen - r width = height = 2*r self.ellipse(x1, y1, width, height) def roundRect(self, x, y, width, height, radius): """Draws a rectangle with rounded corners. The corners are approximately quadrants of a circle, with the given radius.""" #use a precomputed set of factors for the bezier approximation #to a circle. There are six relevant points on the x axis and y axis. #sketch them and it should all make sense! t = 0.4472 * radius x0 = x x1 = x0 + t x2 = x0 + radius x3 = x0 + width - radius x4 = x0 + width - t x5 = x0 + width y0 = y y1 = y0 + t y2 = y0 + radius y3 = y0 + height - radius y4 = y0 + height - t y5 = y0 + height self.moveTo(x2, y0) self.lineTo(x3, y0) #bottom row self.curveTo(x4, y0, x5, y1, x5, y2) #bottom right self.lineTo(x5, y3) #right edge self.curveTo(x5, y4, x4, y5, x3, y5) #top right self.lineTo(x2, y5) #top row self.curveTo(x1, y5, x0, y4, x0, y3) #top left self.lineTo(x0, y2) #left edge self.curveTo(x0, y1, x1, y0, x2, y0) #bottom left self.close() def close(self): "draws a line back to where it started" self._code_append('h')
2.703125
3
client/modules/Wikipedia.py
devagul93/Jarvis-System
0
2548
import wikipedia import re import TCPclient as client WORDS = ["WIKIPEDIA","SEARCH","INFORMATION"] def handle(text,mic,profile): # SEARCH ON WIKIPEDIA # ny = wikipedia.summary("New York",sentences=3); # mic.say("%s"% ny) #mic.say("What you want to search about") #text = mic.activeListen() print "entering wiki term" text = client.grab_input() while text.upper()=="WIKIPEDIA": print "entering while" text = client.grab_input() print text answer = wikipedia.summary(text,sentences=3) answer +="\n" print answer client.send_out(answer) #mic.say(answer) def isValid(text): return bool(re.search(r'\bwikipedia\b',text, re.IGNORECASE))
3.640625
4
Madlibs/madlibs.py
nikhil-amin/python-mini-project
2
2549
import random print("Title : Eat, Drink, And Be Sick") noun = [] for i in range(4): n = input("Enter noun : ") noun.append(n) plural = [] for i in range(6): pn = input("Enter plural noun : ") plural.append(pn) adjective = [] for i in range(2): a = input("Enter adjective : ") adjective.append(a) adverb = input("Enter adverb : ") letter = input("Enter any letter : ") body_part = input("Enter any body part : ") print("An inspector from the Department of Health and ", random.choice(noun) , " Services paid a surprise visit to our " , random.choice(adjective) , " school cafeteria.") print("The lunch special, prepared by our " , random.choice(adjective) , "dietician, was spaghetti and " , random.choice(noun) , " balls with a choice of either a " , random.choice(noun) , " salad or French " , random.choice(plural) , ".") print("The inspector found the meat-" , random.choice(plural) , " to be overcooked and discovered a live " , random.choice(noun) , " in the fries,causing him to have a " + body_part + " ache.") print("In response, he threw up all over his " , random.choice(plural) , ".") print("In his report, the inspector " + adverb + " recommended that the school cafeteria serve only nutritious " , random.choice(plural) , " as well as low-calorie " , random.choice(plural) , " and that all of the saturated " , random.choice(plural) , " be eliminated.") print("He rated the cafeteria a " + letter + "-minus.")
3.9375
4
scripts/transpose.py
saikrishnarallabandi/python_connectionist
0
2550
import numpy g = open('/home/srallaba/mgc/transposed/arctic_a0404.mgc','w') x = numpy.loadtxt('/home/srallaba/mgc_spaces/arctic_a0404.mgc') numpy.savetxt(g, numpy.transpose(x)) g.close()
2.234375
2
tests/zone_api_test/core/zone_manager_test.py
yfaway/zone-apis
1
2551
<gh_stars>1-10 from zone_api.core.zone_manager import ZoneManager from zone_api import platform_encapsulator as pe from zone_api.core.zone import Zone from zone_api.core.zone_event import ZoneEvent from zone_api.core.devices.dimmer import Dimmer from zone_api.core.devices.switch import Fan, Light, Switch from zone_api.core.devices.illuminance_sensor import IlluminanceSensor from zone_api.core.devices.motion_sensor import MotionSensor from zone_api.core.actions.turn_on_switch import TurnOnSwitch from zone_api_test.core.device_test import DeviceTest ILLUMINANCE_THRESHOLD_IN_LUX = 8 INVALID_ITEM_NAME = 'invalid item name' class ZoneManagerTest(DeviceTest): """ Unit tests for zone_manager.py. """ def setUp(self): items = [pe.create_switch_item('TestLightName'), pe.create_switch_item('TestMotionSensorName'), pe.create_number_item('IlluminanceSensorName'), pe.create_string_item('AstroSensorName'), pe.create_dimmer_item('TestDimmerName'), pe.create_switch_item('TestFanName'), ] self.set_items(items) super(ZoneManagerTest, self).setUp() [self.lightItem, self.motionSensorItem, self.illuminanceSensorItem, self.astroSensorItem, self.dimmerItem, self.fanItem] = items self.illuminanceSensor = IlluminanceSensor(self.illuminanceSensorItem) self.light = Light(self.lightItem, 2, ILLUMINANCE_THRESHOLD_IN_LUX) self.motionSensor = MotionSensor(self.motionSensorItem) self.dimmer = Dimmer(self.dimmerItem, 2, 100, "0-23:59") self.fan = Fan(self.fanItem, 2) self.zm = ZoneManager() def tearDown(self): self.zm.stop_auto_report_watch_dog() self.fan._cancel_timer() self.dimmer._cancel_timer() self.light._cancel_timer() super(ZoneManagerTest, self).tearDown() def testAddZone_validZone_zoneAdded(self): zone1 = Zone('ff') self.zm.add_zone(zone1) self.assertEqual(1, len(self.zm.get_zones())) zone2 = Zone('2f') self.zm.add_zone(zone2) self.assertEqual(2, len(self.zm.get_zones())) def testGetZoneById_validZoneId_returnValidZone(self): zone1 = Zone('ff') self.zm.add_zone(zone1) zone2 = Zone('2f') self.zm.add_zone(zone2) self.assertEqual(zone1.get_name(), self.zm.get_zone_by_id(zone1.get_id()).get_name()) self.assertEqual(zone2.get_name(), self.zm.get_zone_by_id(zone2.get_id()).get_name()) def testGetZoneById_invalidZoneId_returnNone(self): self.assertTrue(self.zm.get_zone_by_id('invalid zone id') is None) def testRemoveZone_validZone_zoneRemoved(self): zone1 = Zone('ff') self.zm.add_zone(zone1) zone2 = Zone('2f') self.zm.add_zone(zone2) self.assertEqual(2, len(self.zm.get_zones())) self.zm.remove_zone(zone1) self.assertEqual(1, len(self.zm.get_zones())) self.zm.remove_zone(zone2) self.assertEqual(0, len(self.zm.get_zones())) def testContainingZone_validDevice_returnsCorrectZone(self): zone1 = Zone('ff').add_device(self.light) zone2 = Zone('sf').add_device(self.fan) self.zm.add_zone(zone1) self.zm.add_zone(zone2) self.assertEqual(zone1, self.zm.get_immutable_instance().get_containing_zone(self.light)) self.assertEqual(zone2, self.zm.get_immutable_instance().get_containing_zone(self.fan)) def testContainingZone_invalidDevice_returnsNone(self): zone1 = Zone('ff').add_device(self.light) self.zm.add_zone(zone1) self.assertEqual(None, self.zm.get_immutable_instance().get_containing_zone(self.fan)) def testGetDevicesByType_variousScenarios_returnsCorrectList(self): zone1 = Zone('ff').add_device(self.light) zone2 = Zone('sf').add_device(self.fan) self.zm.add_zone(zone1) self.zm.add_zone(zone2) self.assertEqual(2, len(self.zm.get_zones())) self.assertEqual(1, len(self.zm.get_devices_by_type(Light))) self.assertEqual(2, len(self.zm.get_devices_by_type(Switch))) self.assertEqual(0, len(self.zm.get_devices_by_type(Dimmer))) def testOnMotionSensorTurnedOn_noZone_returnsFalse(self): self.assertFalse(self.zm.get_immutable_instance().dispatch_event( ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, pe.create_string_item(INVALID_ITEM_NAME))) def testOnMotionSensorTurnedOn_withNonApplicableZone_returnsFalse(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertFalse(self.zm.get_immutable_instance().dispatch_event( ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, pe.create_string_item(INVALID_ITEM_NAME))) def testOnMotionSensorTurnedOn_withApplicableZone_returnsTrue(self): self.assertFalse(self.light.is_on()) pe.set_number_value(self.illuminanceSensorItem, ILLUMINANCE_THRESHOLD_IN_LUX - 1) zone = Zone('ff', [self.light, self.motionSensor, self.illuminanceSensor]) zone = zone.add_action(TurnOnSwitch()) self.zm.add_zone(zone) self.assertTrue(self.zm.get_immutable_instance().dispatch_event( ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, self.motionSensor.get_item())) def testOnSwitchTurnedOn_noZone_returnsFalse(self): self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_on( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOn_withNonApplicableZone_returnsFalse(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_on( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOn_withApplicableZone_returnsTrue(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertTrue(self.zm.get_immutable_instance().on_switch_turned_on( pe.get_event_dispatcher(), self.light, self.light.get_item())) def testOnSwitchTurnedOff_noZone_returnsFalse(self): self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_off( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOff_withNonApplicableZone_returnsFalse(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_off( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOff_withApplicableZone_returnsTrue(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertTrue(self.zm.get_immutable_instance().on_switch_turned_off( pe.get_event_dispatcher(), self.light, self.light.get_item()))
2.234375
2
meiduo_mall/meiduo_mall/apps/orders/views.py
Zasling/meiduo_mall33
1
2552
from rest_framework.response import Response from rest_framework.views import APIView from django_redis import get_redis_connection from goods.models import SKU from decimal import Decimal from rest_framework.generics import CreateAPIView,ListAPIView from rest_framework.mixins import ListModelMixin from orders.serializers import OrderShowSerializer, OrderSaveSerializer, OrderListSerializer, CommentSerializers, \ CommentSaveSerializers, CommentShowSerializers from users.models import User from orders.models import OrderInfo,OrderGoods from orders.utils import PageNum from rest_framework.filters import OrderingFilter # 展示订单信息 class OrdersShowView(APIView): def get(self, request): # 获取用户对象 user = request.user # 建立redis连接 conn = get_redis_connection('cart') # 获取hash数据sku_id ,count sku_id_count = conn.hgetall('cart_%s' %user.id) # {10:1} # 将byte类型数据转为整形 cart = {} for sku_id, count in sku_id_count.items(): cart[int(sku_id)] = int(count) # 获取集合数据 sku_ids = conn.smembers('cart_selected_%s' %user.id) # 查询所有选中状态的数据对象 skus = SKU.objects.filter(id__in=sku_ids) # 商品对象添加count属性(sku表中没有count字段,要手动添加属性) for sku in skus: sku.count = cart[sku.id] # 生成运费 freight = Decimal(10.00) # 序列化返回商品对象 ser = OrderShowSerializer({'freight': freight, 'skus': skus}) return Response(ser.data) # 保存订单信息 class OrderSaveView(ListModelMixin, CreateAPIView): serializer_class = OrderSaveSerializer # 订单列表数据获取 class OrderListView(ListAPIView): pagination_class = PageNum serializer_class = OrderListSerializer def get_queryset(self): user = self.request.user order = OrderInfo.objects.filter(user = user) return order # 评论-获取商品信息 class OrderComment(ListAPIView): serializer_class = CommentSerializers def get_queryset(self): order_id = self.kwargs['order_id'] skus = OrderGoods.objects.filter(order_id = order_id, is_commented=False) return skus # 保存评论 class SaveSkuComment(CreateAPIView): serializer_class = CommentSaveSerializers # 商品详情中的评论展示 class ShowComment(ListAPIView): serializer_class = CommentShowSerializers def get_queryset(self): # 从kwargs中获取sku_id sku_id = self.kwargs['sku_id'] # 获取商品信息 orders = OrderGoods.objects.filter(sku_id=sku_id, is_commented = True) for sku in orders: skuinfo = OrderInfo.objects.get(order_id=sku.order_id) user = User.objects.get(id = skuinfo.user_id) # 获取用户名,判断是否匿名 sku.username = user.username if sku.is_anonymous == True: sku.username = '****' return orders
1.984375
2
src/streamlink_cli/main.py
melmorabity/streamlink
0
2553
import argparse import errno import logging import os import platform import signal import sys from collections import OrderedDict from contextlib import closing from distutils.version import StrictVersion from functools import partial from gettext import gettext from itertools import chain from pathlib import Path from time import sleep from typing import List import requests from socks import __version__ as socks_version from websocket import __version__ as websocket_version import streamlink.logger as logger from streamlink import NoPluginError, PluginError, StreamError, Streamlink, __version__ as streamlink_version from streamlink.cache import Cache from streamlink.exceptions import FatalPluginError from streamlink.plugin import Plugin, PluginOptions from streamlink.stream import StreamIO, StreamProcess from streamlink.utils.named_pipe import NamedPipe from streamlink_cli.argparser import build_parser from streamlink_cli.compat import DeprecatedPath, is_win32, stdout from streamlink_cli.console import ConsoleOutput, ConsoleUserInputRequester from streamlink_cli.constants import CONFIG_FILES, DEFAULT_STREAM_METADATA, LOG_DIR, PLUGIN_DIRS, STREAM_SYNONYMS from streamlink_cli.output import FileOutput, Output, PlayerOutput from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url ACCEPTABLE_ERRNO = (errno.EPIPE, errno.EINVAL, errno.ECONNRESET) try: ACCEPTABLE_ERRNO += (errno.WSAECONNABORTED,) except AttributeError: pass # Not windows QUIET_OPTIONS = ("json", "stream_url", "subprocess_cmdline", "quiet") args = None console: ConsoleOutput = None output: Output = None plugin: Plugin = None stream_fd: StreamIO = None streamlink: Streamlink = None log = logging.getLogger("streamlink.cli") def get_formatter(plugin: Plugin): return Formatter( { "url": lambda: args.url, "author": lambda: plugin.get_author(), "category": lambda: plugin.get_category(), "game": lambda: plugin.get_category(), "title": lambda: plugin.get_title(), "time": lambda: datetime.now() }, { "time": lambda dt, fmt: dt.strftime(fmt) } ) def check_file_output(filename, force): """Checks if file already exists and ask the user if it should be overwritten if it does.""" log.debug("Checking file output") if os.path.isfile(filename) and not force: if sys.stdin.isatty(): answer = console.ask(f"File {filename} already exists! Overwrite it? [y/N] ") if answer.lower() != "y": sys.exit() else: log.error(f"File {filename} already exists, use --force to overwrite it.") sys.exit() return FileOutput(filename) def create_output(formatter: Formatter): """Decides where to write the stream. Depending on arguments it can be one of these: - The stdout pipe - A subprocess' stdin pipe - A named pipe that the subprocess reads from - A regular file """ if (args.output or args.stdout) and (args.record or args.record_and_pipe): console.exit("Cannot use record options with other file output options.") if args.output: if args.output == "-": out = FileOutput(fd=stdout) else: out = check_file_output(formatter.filename(args.output, args.fs_safe_rules), args.force) elif args.stdout: out = FileOutput(fd=stdout) elif args.record_and_pipe: record = check_file_output(formatter.filename(args.record_and_pipe, args.fs_safe_rules), args.force) out = FileOutput(fd=stdout, record=record) else: http = namedpipe = record = None if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") if args.player_fifo: try: namedpipe = NamedPipe() except OSError as err: console.exit(f"Failed to create pipe: {err}") elif args.player_http: http = create_http_server() if args.record: record = check_file_output(formatter.filename(args.record, args.fs_safe_rules), args.force) log.info(f"Starting player: {args.player}") out = PlayerOutput( args.player, args=args.player_args, quiet=not args.verbose_player, kill=not args.player_no_close, namedpipe=namedpipe, http=http, record=record, title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url ) return out def create_http_server(*_args, **_kwargs): """Creates a HTTP server listening on a given host and port. If host is empty, listen on all available interfaces, and if port is 0, listen on a random high port. """ try: http = HTTPServer() http.bind(*_args, **_kwargs) except OSError as err: console.exit(f"Failed to create HTTP server: {err}") return http def iter_http_requests(server, player): """Repeatedly accept HTTP connections on a server. Forever if the serving externally, or while a player is running if it is not empty. """ while not player or player.running: try: yield server.open(timeout=2.5) except OSError: continue def output_stream_http(plugin, initial_streams, formatter: Formatter, external=False, port=0): """Continuously output the stream over HTTP.""" global output if not external: if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") server = create_http_server() player = output = PlayerOutput( args.player, args=args.player_args, filename=server.url, quiet=not args.verbose_player, title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url ) try: log.info(f"Starting player: {args.player}") if player: player.open() except OSError as err: console.exit(f"Failed to start player: {args.player} ({err})") else: server = create_http_server(host=None, port=port) player = None log.info("Starting server, access with one of:") for url in server.urls: log.info(" " + url) for req in iter_http_requests(server, player): user_agent = req.headers.get("User-Agent") or "unknown player" log.info(f"Got HTTP request from {user_agent}") stream_fd = prebuffer = None while not stream_fd and (not player or player.running): try: streams = initial_streams or fetch_streams(plugin) initial_streams = None for stream_name in (resolve_stream_name(streams, s) for s in args.stream): if stream_name in streams: stream = streams[stream_name] break else: log.info("Stream not available, will re-fetch streams in 10 sec") sleep(10) continue except PluginError as err: log.error(f"Unable to fetch new streams: {err}") continue try: log.info(f"Opening stream: {stream_name} ({type(stream).shortname()})") stream_fd, prebuffer = open_stream(stream) except StreamError as err: log.error(err) if stream_fd and prebuffer: log.debug("Writing stream to player") read_stream(stream_fd, server, prebuffer, formatter) server.close(True) player.close() server.close() def output_stream_passthrough(stream, formatter: Formatter): """Prepares a filename to be passed to the player.""" global output filename = f'"{stream_to_url(stream)}"' output = PlayerOutput( args.player, args=args.player_args, filename=filename, call=True, quiet=not args.verbose_player, title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url ) try: log.info(f"Starting player: {args.player}") output.open() except OSError as err: console.exit(f"Failed to start player: {args.player} ({err})") return False return True def open_stream(stream): """Opens a stream and reads 8192 bytes from it. This is useful to check if a stream actually has data before opening the output. """ global stream_fd # Attempts to open the stream try: stream_fd = stream.open() except StreamError as err: raise StreamError(f"Could not open stream: {err}") # Read 8192 bytes before proceeding to check for errors. # This is to avoid opening the output unnecessarily. try: log.debug("Pre-buffering 8192 bytes") prebuffer = stream_fd.read(8192) except OSError as err: stream_fd.close() raise StreamError(f"Failed to read data from stream: {err}") if not prebuffer: stream_fd.close() raise StreamError("No data returned from stream") return stream_fd, prebuffer def output_stream(stream, formatter: Formatter): """Open stream, create output and finally write the stream to output.""" global output success_open = False for i in range(args.retry_open): try: stream_fd, prebuffer = open_stream(stream) success_open = True break except StreamError as err: log.error(f"Try {i + 1}/{args.retry_open}: Could not open stream {stream} ({err})") if not success_open: console.exit(f"Could not open stream {stream}, tried {args.retry_open} times, exiting") output = create_output(formatter) try: output.open() except OSError as err: if isinstance(output, PlayerOutput): console.exit(f"Failed to start player: {args.player} ({err})") else: console.exit(f"Failed to open output: {output.filename} ({err})") with closing(output): log.debug("Writing stream to output") read_stream(stream_fd, output, prebuffer, formatter) return True def read_stream(stream, output, prebuffer, formatter: Formatter, chunk_size=8192): """Reads data from stream and then writes it to the output.""" is_player = isinstance(output, PlayerOutput) is_http = isinstance(output, HTTPServer) is_fifo = is_player and output.namedpipe show_progress = ( isinstance(output, FileOutput) and output.fd is not stdout and (sys.stdout.isatty() or args.force_progress) ) show_record_progress = ( hasattr(output, "record") and isinstance(output.record, FileOutput) and output.record.fd is not stdout and (sys.stdout.isatty() or args.force_progress) ) stream_iterator = chain( [prebuffer], iter(partial(stream.read, chunk_size), b"") ) if show_progress: stream_iterator = progress( stream_iterator, prefix=os.path.basename(output.filename) ) elif show_record_progress: stream_iterator = progress( stream_iterator, prefix=os.path.basename(output.record.filename) ) try: for data in stream_iterator: # We need to check if the player process still exists when # using named pipes on Windows since the named pipe is not # automatically closed by the player. if is_win32 and is_fifo: output.player.poll() if output.player.returncode is not None: log.info("Player closed") break try: output.write(data) except OSError as err: if is_player and err.errno in ACCEPTABLE_ERRNO: log.info("Player closed") elif is_http and err.errno in ACCEPTABLE_ERRNO: log.info("HTTP connection closed") else: console.exit(f"Error when writing to output: {err}, exiting") break except OSError as err: console.exit(f"Error when reading from stream: {err}, exiting") finally: stream.close() log.info("Stream ended") def handle_stream(plugin, streams, stream_name): """Decides what to do with the selected stream. Depending on arguments it can be one of these: - Output internal command-line - Output JSON represenation - Continuously output the stream over HTTP - Output stream data to selected output """ stream_name = resolve_stream_name(streams, stream_name) stream = streams[stream_name] # Print internal command-line if this stream # uses a subprocess. if args.subprocess_cmdline: if isinstance(stream, StreamProcess): try: cmdline = stream.cmdline() except StreamError as err: console.exit(err) console.msg(cmdline) else: console.exit("The stream specified cannot be translated to a command") # Print JSON representation of the stream elif args.json: console.msg_json( stream, metadata=plugin.get_metadata() ) elif args.stream_url: try: console.msg(stream.to_url()) except TypeError: console.exit("The stream specified cannot be translated to a URL") # Output the stream else: # Find any streams with a '_alt' suffix and attempt # to use these in case the main stream is not usable. alt_streams = list(filter(lambda k: stream_name + "_alt" in k, sorted(streams.keys()))) file_output = args.output or args.stdout formatter = get_formatter(plugin) for stream_name in [stream_name] + alt_streams: stream = streams[stream_name] stream_type = type(stream).shortname() if stream_type in args.player_passthrough and not file_output: log.info(f"Opening stream: {stream_name} ({stream_type})") success = output_stream_passthrough(stream, formatter) elif args.player_external_http: return output_stream_http(plugin, streams, formatter, external=True, port=args.player_external_http_port) elif args.player_continuous_http and not file_output: return output_stream_http(plugin, streams, formatter) else: log.info(f"Opening stream: {stream_name} ({stream_type})") success = output_stream(stream, formatter) if success: break def fetch_streams(plugin): """Fetches streams using correct parameters.""" return plugin.streams(stream_types=args.stream_types, sorting_excludes=args.stream_sorting_excludes) def fetch_streams_with_retry(plugin, interval, count): """Attempts to fetch streams repeatedly until some are returned or limit hit.""" try: streams = fetch_streams(plugin) except PluginError as err: log.error(err) streams = None if not streams: log.info(f"Waiting for streams, retrying every {interval} second(s)") attempts = 0 while not streams: sleep(interval) try: streams = fetch_streams(plugin) except FatalPluginError: raise except PluginError as err: log.error(err) if count > 0: attempts += 1 if attempts >= count: break return streams def resolve_stream_name(streams, stream_name): """Returns the real stream name of a synonym.""" if stream_name in STREAM_SYNONYMS and stream_name in streams: for name, stream in streams.items(): if stream is streams[stream_name] and name not in STREAM_SYNONYMS: return name return stream_name def format_valid_streams(plugin, streams): """Formats a dict of streams. Filters out synonyms and displays them next to the stream they point to. Streams are sorted according to their quality (based on plugin.stream_weight). """ delimiter = ", " validstreams = [] for name, stream in sorted(streams.items(), key=lambda stream: plugin.stream_weight(stream[0])): if name in STREAM_SYNONYMS: continue def synonymfilter(n): return stream is streams[n] and n is not name synonyms = list(filter(synonymfilter, streams.keys())) if len(synonyms) > 0: joined = delimiter.join(synonyms) name = f"{name} ({joined})" validstreams.append(name) return delimiter.join(validstreams) def handle_url(): """The URL handler. Attempts to resolve the URL to a plugin and then attempts to fetch a list of available streams. Proceeds to handle stream if user specified a valid one, otherwise output list of valid streams. """ try: plugin = streamlink.resolve_url(args.url) setup_plugin_options(streamlink, plugin) log.info(f"Found matching plugin {plugin.module} for URL {args.url}") if args.retry_max or args.retry_streams: retry_streams = 1 retry_max = 0 if args.retry_streams: retry_streams = args.retry_streams if args.retry_max: retry_max = args.retry_max streams = fetch_streams_with_retry(plugin, retry_streams, retry_max) else: streams = fetch_streams(plugin) except NoPluginError: console.exit(f"No plugin can handle URL: {args.url}") except PluginError as err: console.exit(err) if not streams: console.exit(f"No playable streams found on this URL: {args.url}") if args.default_stream and not args.stream and not args.json: args.stream = args.default_stream if args.stream: validstreams = format_valid_streams(plugin, streams) for stream_name in args.stream: if stream_name in streams: log.info(f"Available streams: {validstreams}") handle_stream(plugin, streams, stream_name) return err = f"The specified stream(s) '{', '.join(args.stream)}' could not be found" if args.json: console.msg_json( plugin=plugin.module, metadata=plugin.get_metadata(), streams=streams, error=err ) else: console.exit(f"{err}.\n Available streams: {validstreams}") elif args.json: console.msg_json( plugin=plugin.module, metadata=plugin.get_metadata(), streams=streams ) elif args.stream_url: try: console.msg(streams[list(streams)[-1]].to_manifest_url()) except TypeError: console.exit("The stream specified cannot be translated to a URL") else: validstreams = format_valid_streams(plugin, streams) console.msg(f"Available streams: {validstreams}") def print_plugins(): """Outputs a list of all plugins Streamlink has loaded.""" pluginlist = list(streamlink.get_plugins().keys()) pluginlist_formatted = ", ".join(sorted(pluginlist)) if args.json: console.msg_json(pluginlist) else: console.msg(f"Loaded plugins: {pluginlist_formatted}") def load_plugins(dirs: List[Path], showwarning: bool = True): """Attempts to load plugins from a list of directories.""" for directory in dirs: if directory.is_dir(): success = streamlink.load_plugins(str(directory)) if success and type(directory) is DeprecatedPath: log.info(f"Loaded plugins from deprecated path, see CLI docs for how to migrate: {directory}") elif showwarning: log.warning(f"Plugin path {directory} does not exist or is not a directory!") def setup_args(parser: argparse.ArgumentParser, config_files: List[Path] = None, ignore_unknown: bool = False): """Parses arguments.""" global args arglist = sys.argv[1:] # Load arguments from config files configs = [f"@{config_file}" for config_file in config_files or []] args, unknown = parser.parse_known_args(configs + arglist) if unknown and not ignore_unknown: msg = gettext("unrecognized arguments: %s") parser.error(msg % " ".join(unknown)) # Force lowercase to allow case-insensitive lookup if args.stream: args.stream = [stream.lower() for stream in args.stream] if not args.url and args.url_param: args.url = args.url_param def setup_config_args(parser, ignore_unknown=False): config_files = [] if args.config: # We want the config specified last to get highest priority for config_file in map(lambda path: Path(path).expanduser(), reversed(args.config)): if config_file.is_file(): config_files.append(config_file) else: # Only load first available default config for config_file in filter(lambda path: path.is_file(), CONFIG_FILES): if type(config_file) is DeprecatedPath: log.info(f"Loaded config from deprecated path, see CLI docs for how to migrate: {config_file}") config_files.append(config_file) break if streamlink and args.url: # Only load first available plugin config with ignored(NoPluginError): plugin = streamlink.resolve_url(args.url) for config_file in CONFIG_FILES: config_file = config_file.with_name(f"{config_file.name}.{plugin.module}") if not config_file.is_file(): continue if type(config_file) is DeprecatedPath: log.info(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {config_file}") config_files.append(config_file) break if config_files: setup_args(parser, config_files, ignore_unknown=ignore_unknown) def setup_signals(): # Handle SIGTERM just like SIGINT signal.signal(signal.SIGTERM, signal.default_int_handler) def setup_http_session(): """Sets the global HTTP settings, such as proxy and headers.""" if args.http_proxy: streamlink.set_option("http-proxy", args.http_proxy) if args.https_proxy: streamlink.set_option("https-proxy", args.https_proxy) if args.http_cookie: streamlink.set_option("http-cookies", dict(args.http_cookie)) if args.http_header: streamlink.set_option("http-headers", dict(args.http_header)) if args.http_query_param: streamlink.set_option("http-query-params", dict(args.http_query_param)) if args.http_ignore_env: streamlink.set_option("http-trust-env", False) if args.http_no_ssl_verify: streamlink.set_option("http-ssl-verify", False) if args.http_disable_dh: streamlink.set_option("http-disable-dh", True) if args.http_ssl_cert: streamlink.set_option("http-ssl-cert", args.http_ssl_cert) if args.http_ssl_cert_crt_key: streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key)) if args.http_timeout: streamlink.set_option("http-timeout", args.http_timeout) def setup_plugins(extra_plugin_dir=None): """Loads any additional plugins.""" load_plugins(PLUGIN_DIRS, showwarning=False) if extra_plugin_dir: load_plugins([Path(path).expanduser() for path in extra_plugin_dir]) def setup_streamlink(): """Creates the Streamlink session.""" global streamlink streamlink = Streamlink({"user-input-requester": ConsoleUserInputRequester(console)}) def setup_options(): """Sets Streamlink options.""" if args.interface: streamlink.set_option("interface", args.interface) if args.ipv4: streamlink.set_option("ipv4", args.ipv4) if args.ipv6: streamlink.set_option("ipv6", args.ipv6) if args.ringbuffer_size: streamlink.set_option("ringbuffer-size", args.ringbuffer_size) if args.mux_subtitles: streamlink.set_option("mux-subtitles", args.mux_subtitles) if args.hds_live_edge: streamlink.set_option("hds-live-edge", args.hds_live_edge) if args.hls_live_edge: streamlink.set_option("hls-live-edge", args.hls_live_edge) if args.hls_playlist_reload_attempts: streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts) if args.hls_playlist_reload_time: streamlink.set_option("hls-playlist-reload-time", args.hls_playlist_reload_time) if args.hls_segment_ignore_names: streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names) if args.hls_segment_key_uri: streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri) if args.hls_audio_select: streamlink.set_option("hls-audio-select", args.hls_audio_select) if args.hls_start_offset: streamlink.set_option("hls-start-offset", args.hls_start_offset) if args.hls_duration: streamlink.set_option("hls-duration", args.hls_duration) if args.hls_live_restart: streamlink.set_option("hls-live-restart", args.hls_live_restart) if args.rtmp_rtmpdump: streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump) elif args.rtmpdump: streamlink.set_option("rtmp-rtmpdump", args.rtmpdump) if args.rtmp_proxy: streamlink.set_option("rtmp-proxy", args.rtmp_proxy) # deprecated if args.hds_segment_attempts: streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts) if args.hds_segment_threads: streamlink.set_option("hds-segment-threads", args.hds_segment_threads) if args.hds_segment_timeout: streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout) if args.hds_timeout: streamlink.set_option("hds-timeout", args.hds_timeout) if args.hls_segment_attempts: streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts) if args.hls_segment_threads: streamlink.set_option("hls-segment-threads", args.hls_segment_threads) if args.hls_segment_timeout: streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout) if args.hls_timeout: streamlink.set_option("hls-timeout", args.hls_timeout) if args.http_stream_timeout: streamlink.set_option("http-stream-timeout", args.http_stream_timeout) if args.rtmp_timeout: streamlink.set_option("rtmp-timeout", args.rtmp_timeout) # generic stream- arguments take precedence over deprecated stream-type arguments if args.stream_segment_attempts: streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts) if args.stream_segment_threads: streamlink.set_option("stream-segment-threads", args.stream_segment_threads) if args.stream_segment_timeout: streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout) if args.stream_timeout: streamlink.set_option("stream-timeout", args.stream_timeout) if args.ffmpeg_ffmpeg: streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg) if args.ffmpeg_verbose: streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose) if args.ffmpeg_verbose_path: streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path) if args.ffmpeg_fout: streamlink.set_option("ffmpeg-fout", args.ffmpeg_fout) if args.ffmpeg_video_transcode: streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode) if args.ffmpeg_audio_transcode: streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode) if args.ffmpeg_copyts: streamlink.set_option("ffmpeg-copyts", args.ffmpeg_copyts) if args.ffmpeg_start_at_zero: streamlink.set_option("ffmpeg-start-at-zero", args.ffmpeg_start_at_zero) streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog) streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path) streamlink.set_option("locale", args.locale) def setup_plugin_args(session, parser): """Sets Streamlink plugin options.""" plugin_args = parser.add_argument_group("Plugin options") for pname, plugin in session.plugins.items(): defaults = {} group = plugin_args.add_argument_group(pname.capitalize()) for parg in plugin.arguments: if not parg.is_global: group.add_argument(parg.argument_name(pname), **parg.options) defaults[parg.dest] = parg.default else: pargdest = parg.dest for action in parser._actions: # find matching global argument if pargdest != action.dest: continue defaults[pargdest] = action.default # add plugin to global argument plugins = getattr(action, "plugins", []) plugins.append(pname) setattr(action, "plugins", plugins) plugin.options = PluginOptions(defaults) def setup_plugin_options(session, plugin): """Sets Streamlink plugin options.""" pname = plugin.module required = OrderedDict({}) for parg in plugin.arguments: if parg.options.get("help") == argparse.SUPPRESS: continue value = getattr(args, parg.dest if parg.is_global else parg.namespace_dest(pname)) session.set_plugin_option(pname, parg.dest, value) if not parg.is_global: if parg.required: required[parg.name] = parg # if the value is set, check to see if any of the required arguments are not set if parg.required or value: try: for rparg in plugin.arguments.requires(parg.name): required[rparg.name] = rparg except RuntimeError: log.error(f"{pname} plugin has a configuration error and the arguments cannot be parsed") break if required: for req in required.values(): if not session.get_plugin_option(pname, req.dest): prompt = f"{req.prompt or f'Enter {pname} {req.name}'}: " session.set_plugin_option( pname, req.dest, console.askpass(prompt) if req.sensitive else console.ask(prompt) ) def log_root_warning(): if hasattr(os, "getuid"): if os.geteuid() == 0: log.info("streamlink is running as root! Be careful!") def log_current_versions(): """Show current installed versions""" if not logger.root.isEnabledFor(logging.DEBUG): return # macOS if sys.platform == "darwin": os_version = f"macOS {platform.mac_ver()[0]}" # Windows elif sys.platform == "win32": os_version = f"{platform.system()} {platform.release()}" # Linux / other else: os_version = platform.platform() log.debug(f"OS: {os_version}") log.debug(f"Python: {platform.python_version()}") log.debug(f"Streamlink: {streamlink_version}") log.debug(f"Requests({requests.__version__}), " f"Socks({socks_version}), " f"Websocket({websocket_version})") def log_current_arguments(session, parser): global args if not logger.root.isEnabledFor(logging.DEBUG): return sensitive = set() for pname, plugin in session.plugins.items(): for parg in plugin.arguments: if parg.sensitive: sensitive.add(parg.argument_name(pname)) log.debug("Arguments:") for action in parser._actions: if not hasattr(args, action.dest): continue value = getattr(args, action.dest) if action.default != value: name = next( # pragma: no branch (option for option in action.option_strings if option.startswith("--")), action.option_strings[0] ) if action.option_strings else action.dest log.debug(f" {name}={value if name not in sensitive else '*' * 8}") def check_version(force=False): cache = Cache(filename="cli.json") latest_version = cache.get("latest_version") if force or not latest_version: res = requests.get("https://pypi.python.org/pypi/streamlink/json") data = res.json() latest_version = data.get("info").get("version") cache.set("latest_version", latest_version, (60 * 60 * 24)) version_info_printed = cache.get("version_info_printed") if not force and version_info_printed: return installed_version = StrictVersion(streamlink.version) latest_version = StrictVersion(latest_version) if latest_version > installed_version: log.info(f"A new version of Streamlink ({latest_version}) is available!") cache.set("version_info_printed", True, (60 * 60 * 6)) elif force: log.info(f"Your Streamlink version ({installed_version}) is up to date!") if force: sys.exit() def setup_logger_and_console(stream=sys.stdout, filename=None, level="info", json=False): global console if filename == "-": filename = LOG_DIR / f"{datetime.now()}.log" elif filename: filename = Path(filename).expanduser().resolve() if filename: filename.parent.mkdir(parents=True, exist_ok=True) streamhandler = logger.basicConfig( stream=stream, filename=filename, level=level, style="{", format=("[{asctime}]" if level == "trace" else "") + "[{name}][{levelname}] {message}", datefmt="%H:%M:%S" + (".%f" if level == "trace" else "") ) console = ConsoleOutput(streamhandler.stream, json) def main(): error_code = 0 parser = build_parser() setup_args(parser, ignore_unknown=True) # call argument set up as early as possible to load args from config files setup_config_args(parser, ignore_unknown=True) # Console output should be on stderr if we are outputting # a stream to stdout. if args.stdout or args.output == "-" or args.record_and_pipe: console_out = sys.stderr else: console_out = sys.stdout # We don't want log output when we are printing JSON or a command-line. silent_log = any(getattr(args, attr) for attr in QUIET_OPTIONS) log_level = args.loglevel if not silent_log else "none" log_file = args.logfile if log_level != "none" else None setup_logger_and_console(console_out, log_file, log_level, args.json) setup_signals() setup_streamlink() # load additional plugins setup_plugins(args.plugin_dirs) setup_plugin_args(streamlink, parser) # call setup args again once the plugin specific args have been added setup_args(parser) setup_config_args(parser) # update the logging level if changed by a plugin specific config log_level = args.loglevel if not silent_log else "none" logger.root.setLevel(log_level) setup_http_session() log_root_warning() log_current_versions() log_current_arguments(streamlink, parser) if args.version_check or args.auto_version_check: with ignored(Exception): check_version(force=args.version_check) if args.plugins: print_plugins() elif args.can_handle_url: try: streamlink.resolve_url(args.can_handle_url) except NoPluginError: error_code = 1 except KeyboardInterrupt: error_code = 130 elif args.can_handle_url_no_redirect: try: streamlink.resolve_url_no_redirect(args.can_handle_url_no_redirect) except NoPluginError: error_code = 1 except KeyboardInterrupt: error_code = 130 elif args.url: try: setup_options() handle_url() except KeyboardInterrupt: # Close output if output: output.close() console.msg("Interrupted! Exiting...") error_code = 130 finally: if stream_fd: try: log.info("Closing currently open stream...") stream_fd.close() except KeyboardInterrupt: error_code = 130 elif args.help: parser.print_help() else: usage = parser.format_usage() console.msg( f"{usage}\n" f"Use -h/--help to see the available options or read the manual at https://streamlink.github.io" ) sys.exit(error_code) def parser_helper(): session = Streamlink() parser = build_parser() setup_plugin_args(session, parser) return parser
1.859375
2
dbaccesslibUserMailInfo.py
Koushik-ks/FlaskAPP
0
2554
from io import BytesIO from io import StringIO import json from bson.dbref import DBRef import datetime from bson import json_util import logging import base64 jsonCode ={ "building":{ "Essae Vaishnavi Solitaire": { "id": "B1", "division": { "SS": { "id": "D1", "dept":{ "Semicon":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "RND":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "Mobile":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } }, "TTEC": { "id": "D2", "dept":{ "TTEC-AL":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "TTEC-SL":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "TTEC-DL":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "TTEC-CI":{ "id":"DEP4", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } } } }, "Fortune Summit": { "id": "B2", "division": { "TMSC": { "id": "D1", "dept":{ "Medical":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "RND":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "Imaging":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } }, "tmc": { "id": "D2", "dept":{ "tmc-1":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "tmc-2":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "tmc-3":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } } } } } } #Create and configure logger logging.basicConfig(filename="server.log", format='%(asctime)s %(message)s', filemode='a') #Creating an object logger=logging.getLogger() #Setting the threshold of logger to DEBUG logger.setLevel(logging.DEBUG) import pymongo uri = "mongodb://218ffa09-0ee0-4-231-b9ee:zTV4cwDG0vM49J2GFsw72JzwOD79Bv3dPU8fbVLb5pbh3p0CmTBYcvhrFKTjtl1s7hgYSfRbMOrsVve6hfvhag==@218ffa09-0ee0-4-231-b9ee.documents.azure.com:10255/?ssl=true&replicaSet=globaldb" client = pymongo.MongoClient(uri) print("Obtained the client") mydb = client.test def sortingReq(item): new_thrash_date = datetime.datetime.strptime(item["scan_date"], '%d-%m-%Y').date() return new_thrash_date def checkIfAutoThrashed(jsonData,tags): if(len(tags) < 3): return False a = mydb.userInfo.find_one({"name":jsonData["name"]}) newDbref = DBRef("mydb.userInfo",a["_id"]) foundMails = mydb.mltable.find({"otherdbref":newDbref,"status":"trash"}) foundMailsList = list(mydb.mltable.find({"otherdbref":newDbref,"status":"trash"})) if(len(foundMailsList) < 10): return False tagcount = 0 thrashcount = 0 for item in foundMails: for tag in tags: if(tag in item["tags"]): tagcount+=1 if(tagcount >= 3): thrashcount+=1 if(thrashcount >=10): return True return False def generateqrcode(jsonData,filenameJPG,tags,fromMFP): logger.debug("Received data for generating color code = ") logger.debug(jsonData) ilocation=1 today = datetime.datetime.now() date = str(today.day) time = str(today.hour) + ":" + str(today.minute) + ":" + str(today.second)+":"+str(today.microsecond) dateTimeNow = date+':'+time logger.debug("Current Datetime - "+dateTimeNow) dateTimeNow = str(today.day)+str(today.hour)+str(today.minute)+str(today.second)+(str(today.microsecond)[:2]) logger.debug("Unique Code - "+dateTimeNow) if(int(jsonData["cubicle"])>25 and int(jsonData["cubicle"])<=50): ilocation=2 elif(int(jsonData["cubicle"])>50 and int(jsonData["cubicle"])<=75): ilocation=3 else: ilocation=4 logger.debug(jsonData["building"]) colorCode=jsonCode["building"][jsonData["building"]]["id"]+':'+jsonCode["building"][jsonData["building"]]["division"][jsonData["division"]]["id"]+':'+dateTimeNow logger.debug("ColorCode - "+colorCode) logger.debug("generateColorCode:: ColorCode value ="+colorCode) import qrcode img = qrcode.make(colorCode) logger.debug(type(img)) autoThrashed = checkIfAutoThrashed(jsonData,tags) logger.debug("Auto thrashed value is %d" % autoThrashed) logger.debug("Tags are %s" % tags) import sendEmail as se se.execute(str(jsonData["email"]),filenameJPG,str(colorCode),img,autoThrashed,fromMFP) #img = qrcode.make(colorCode) #img.save(colorCode+".png") newjsonData = {"name":jsonData["name"],"code":colorCode,"email":jsonData["email"],"division":jsonData["division"],"department":jsonData["department"],"floor":jsonData["floor"],"cubicle":jsonData["cubicle"],"building":jsonData["building"]} if(fromMFP): newjsonData["source"] = "MFP" else: newjsonData["source"] = "Mobile" return addEntry(newjsonData,tags,autoThrashed); def addEntry(jsonData,tags,autoThrashed): a = mydb.userInfo.find_one({"name":jsonData["name"]}) newDbref = DBRef("mydb.userInfo",a["_id"]) scan_date = datetime.datetime.today() scan_date = scan_date + datetime.timedelta(hours=9) end_date = scan_date + datetime.timedelta(days=10) scan_date = str(scan_date.day) +"-"+ str(scan_date.month)+"-" + str(scan_date.year) end_date = str(end_date.day) +"-" +str(end_date.month)+"-" + str(end_date.year) if(autoThrashed): end_date = scan_date if( not autoThrashed and len(tags) >= 3): #mydb.mltable.insert({"code":jsonData["code"],"tags": tags,"status":"Keep","user_id":1,"otherdbref":newDbref}) Actual Code mydb.mltable.insert({"code":jsonData["code"],"tags": tags,"status":"Keep","user_id":1,"otherdbref":newDbref})#Test code to be removed #end_date = scan_date mydb.userMailInfo.insert({"code":jsonData["code"],"scan_date":scan_date,"end_date":end_date,"otherdbref":newDbref,"userDeleted":False,"user_id":1,"source":jsonData["source"]}) jsonData["autoThrashed"] = autoThrashed return json.dumps(jsonData) def read_fromDB(): new_list = list() for item in mydb.userMailInfo.find({},{"_id":0,"user_id":0}): print(item) otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list.append(dall) new_list.reverse() return json.dumps(new_list,default=json_util.default) def getspecificDate(jsonData): logger.debug(jsonData) num = int(jsonData['page']) skips = 10 * (num - 1) if(jsonData["action"] == "all"): all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0})) all_list.reverse() totalsize = len(all_list) all_list = all_list[skips:] all_list = all_list[:10] new_list_new = list() for item in all_list: otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list_new.append(dall) new_list_new.append({"totalsize":totalsize}) logger.debug(new_list_new) #new_list_new.sort(key = lambda x : x["name"]) return json.dumps(new_list_new, default=json_util.default) elif(jsonData["action"] == "today"): all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0})) thrash_date = datetime.datetime.today() thrash_date = thrash_date + datetime.timedelta(hours=9) thrash_date = str(thrash_date.day) + "-" +str(thrash_date.month)+"-" + str(thrash_date.year) thrash_date = datetime.datetime.strptime(thrash_date, '%d-%m-%Y').date() new_list = list() for item in all_list: if(item['end_date'] == "DONT TRASH"): continue db_date = datetime.datetime.strptime(item['end_date'],'%d-%m-%Y').date() if(db_date <= thrash_date): new_list.append(item) new_list.reverse() totalsize = len(new_list) new_list = new_list[skips:] new_list = new_list[:10] new_list_new = list() for item in new_list: otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list_new.append(dall) new_list_new.append({"totalsize":totalsize}) logger.debug(new_list_new) #new_list_new.sort(key = lambda x : x["name"]) return json.dumps(new_list_new, default=json_util.default) else: all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0})) thrash_date = datetime.datetime.today() thrash_date = thrash_date + datetime.timedelta(hours=9) thrash_date = str(thrash_date.day) + "-" +str(thrash_date.month)+"-" + str(thrash_date.year) thrash_date = datetime.datetime.strptime(thrash_date, '%d-%m-%Y').date() new_list = list() for item in all_list: db_date = datetime.datetime.strptime(item['scan_date'],'%d-%m-%Y').date() if(db_date == thrash_date): new_list.append(item) new_list.reverse() totalsize = len(new_list) new_list = new_list[skips:] new_list = new_list[:10] new_list_new = list() for item in new_list: otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list_new.append(dall) new_list_new.append({"totalsize":totalsize}) logger.debug(new_list_new) return json.dumps(new_list_new, default=json_util.default) def update_DB(jsonData): logger.debug("DBUMI::Update_db() entry") logger.debug(jsonData["code"]) logger.debug(jsonData["end_date"]) foundmail = mydb.userMailInfo.find_one({"code":jsonData["code"]},{"_id":1}) logger.debug(foundmail) foundMl = mydb.mltable.find_one({"code":jsonData["code"]},{"_id":1}) logger.debug(foundMl) mydb.userMailInfo.update_many({"_id":foundmail["_id"],"user_id":1},{"$set":{'end_date':str(jsonData['end_date'])}}) if(not jsonData['end_date'] == "DONT TRASH"): mydb.mltable.update_many({"_id":foundMl["_id"],"user_id":1},{"$set":{"status":"trash"}}) return json.dumps({"status": "Success","statusreason": "updateSucess"}) #Clear DB only for testing def delete_entry(jsonData): logger.debug("DBUMI::delete_entry() entry") logger.debug(jsonData["code"]) mydb.userMailInfo.delete_one({"code":jsonData["code"],"user_id":1}) return json.dumps({"status": "Success","statusreason": "updateSucess"}) def clear_db(): mydb.userMailInfo.remove({})
2.296875
2
src/test/dags/bq_to_cm_dag_test.py
google/cc4d
0
2555
# python3 # coding=utf-8 # Copyright 2020 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for dags.bq_to_cm_dag.""" import unittest from airflow.contrib.hooks import bigquery_hook from airflow.models import baseoperator from airflow.models import dag from airflow.models import variable import mock from gps_building_blocks.cloud.utils import cloud_auth from dags import bq_to_cm_dag from plugins.pipeline_plugins.hooks import monitoring_hook _DAG_NAME = bq_to_cm_dag._DAG_NAME AIRFLOW_VARIABLES = { 'dag_name': _DAG_NAME, f'{_DAG_NAME}_schedule': '@once', f'{_DAG_NAME}_retries': 0, f'{_DAG_NAME}_retry_delay': 3, f'{_DAG_NAME}_is_retry': True, f'{_DAG_NAME}_is_run': True, f'{_DAG_NAME}_enable_run_report': False, f'{_DAG_NAME}_enable_monitoring': True, f'{_DAG_NAME}_enable_monitoring_cleanup': False, 'monitoring_data_days_to_live': 50, 'monitoring_dataset': 'test_monitoring_dataset', 'monitoring_table': 'test_monitoring_table', 'monitoring_bq_conn_id': 'test_monitoring_conn', 'bq_dataset_id': 'test_dataset', 'bq_table_id': 'test_table', 'cm_profile_id': 'cm_profile_id', 'cm_service_account': 'cm_service_account' } class BQToCMDAGTest(unittest.TestCase): def setUp(self): super(BQToCMDAGTest, self).setUp() self.addCleanup(mock.patch.stopall) self.build_impersonated_client_mock = mock.patch.object( cloud_auth, 'build_impersonated_client', autospec=True) self.build_impersonated_client_mock.return_value = mock.Mock() self.build_impersonated_client_mock.start() self.mock_variable = mock.patch.object( variable, 'Variable', autospec=True).start() # `side_effect` is assigned to `lambda` to dynamically return values # each time when self.mock_variable is called. self.mock_variable.get.side_effect = ( lambda key, value: AIRFLOW_VARIABLES[key]) self.original_bigquery_hook_init = bigquery_hook.BigQueryHook.__init__ bigquery_hook.BigQueryHook.__init__ = mock.MagicMock() self.original_monitoring_hook = monitoring_hook.MonitoringHook monitoring_hook.MonitoringHook = mock.MagicMock() def tearDown(self): super().tearDown() bigquery_hook.BigQueryHook.__init__ = self.original_bigquery_hook_init monitoring_hook.MonitoringHook = self.original_monitoring_hook def test_create_dag(self): """Tests that returned DAG contains correct DAG and tasks.""" expected_task_ids = ['bq_to_cm_retry_task', 'bq_to_cm_task'] test_dag = bq_to_cm_dag.BigQueryToCMDag( AIRFLOW_VARIABLES['dag_name']).create_dag() self.assertIsInstance(test_dag, dag.DAG) self.assertEqual(len(test_dag.tasks), len(expected_task_ids)) for task in test_dag.tasks: self.assertIsInstance(task, baseoperator.BaseOperator) actual_task_ids = [t.task_id for t in test_dag.tasks] self.assertListEqual(actual_task_ids, expected_task_ids) if __name__ == '__main__': unittest.main()
1.851563
2
talentmap_api/common/management/commands/load_xml.py
burgwyn/State-TalentMAP-API
5
2556
from django.core.management.base import BaseCommand import logging import re from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag from talentmap_api.language.models import Language, Proficiency from talentmap_api.position.models import Grade, Skill, Position, CapsuleDescription, SkillCone from talentmap_api.organization.models import Organization, Post, TourOfDuty, Location, Country class Command(BaseCommand): help = 'Loads an XML into a supported file' logger = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super(Command, self).__init__(*args, **kwargs) self.modes = { 'languages': mode_languages, 'proficiencies': mode_proficiencies, 'grades': mode_grades, 'skills': mode_skills, 'organizations': mode_organizations, 'positions': mode_positions, 'tours_of_duty': mode_tour_of_duty, 'posts': mode_post, 'countries': mode_country, 'locations': mode_location, 'capsule_descriptions': mode_capsule_description, 'skill_cone': mode_skill_cone } def add_arguments(self, parser): parser.add_argument('file', nargs=1, type=str, help="The XML file to load") parser.add_argument('type', nargs=1, type=str, choices=self.modes.keys(), help="The type of data in the XML") parser.add_argument('--delete', dest='delete', action='store_true', help='Delete collisions') parser.add_argument('--update', dest='update', action='store_true', help='Update collisions') parser.add_argument('--skippost', dest='skip_post', action='store_true', help='Skip post load functions') def handle(self, *args, **options): model, instance_tag, tag_map, collision_field, post_load_function = self.modes[options['type'][0]]() # Set / update the collision behavior collision_behavior = None if options['delete']: collision_behavior = "delete" elif options['update']: collision_behavior = "update" else: collision_behavior = "skip" loader = XMLloader(model, instance_tag, tag_map, collision_behavior, collision_field) new_ids, updated_ids = loader.create_models_from_xml(options['file'][0]) # Run the post load function, if it exists if callable(post_load_function) and not options['skip_post']: post_load_function(new_ids, updated_ids) self.logger.info(f"XML Load Report\n\tNew: {len(new_ids)}\n\tUpdated: {len(updated_ids)}\t\t") def mode_languages(): model = Language instance_tag = "LANGUAGES:LANGUAGE" collision_field = "code" tag_map = { "LANGUAGES:LANG_CODE": "code", "LANGUAGES:LANG_LONG_DESC": "long_description", "LANGUAGES:LANG_SHORT_DESC": "short_description", "LANGUAGES:LANG_EFFECTIVE_DATE": parse_date("effective_date") } return (model, instance_tag, tag_map, collision_field, None) def mode_proficiencies(): model = Proficiency instance_tag = "LANGUAGE_PROFICIENCY:LANGUAGE_PROFICIENCY" collision_field = "code" tag_map = { "LANGUAGE_PROFICIENCY:LP_CODE": "code", "LANGUAGE_PROFICIENCY:LP_DESC": "description" } return (model, instance_tag, tag_map, collision_field, None) def mode_grades(): model = Grade instance_tag = "GRADES:GRADE" collision_field = "code" tag_map = { "GRADES:GRD_GRADE_CODE": "code" } def post_load_function(new_ids, updated_ids): for pos in Grade.objects.filter(id__in=new_ids + updated_ids): pos.update_relationships() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_skills(): model = Skill instance_tag = "SKILLS:SKILL" collision_field = "code" tag_map = { "SKILLS:SKILL_CODE": "code", "SKILLS:SKILL_DESCRIPTION": "description" } return (model, instance_tag, tag_map, collision_field, None) def mode_organizations(): model = Organization instance_tag = "DATA_RECORD" collision_field = "code" tag_map = { "ORG_CODE": "code", "ORG_SHORT_DESC": "short_description", "ORG_LONG_DESC": strip_extra_spaces("long_description"), "ORG_PARENT_ORG_CODE": "_parent_organization_code", "ORG_BUREAU_ORG_CODE": "_parent_bureau_code", "ORG_LOCATION_CODE": "_location_code" } # Update relationships def post_load_function(new_ids, updated_ids): for org in Organization.objects.filter(id__in=new_ids + updated_ids): org.update_relationships() # Regional code setting is done automatically by DOS Webservices, so # we now only need this logic when loading from our sample XML files # Array of regional codes regional_codes = [ "110000", "120000", "130000", "140000", "146000", "150000", "160000" ] if org.code in regional_codes: org.is_regional = True else: org.is_regional = False if org.code == org._parent_bureau_code: org.is_bureau = True org.save() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_positions(): model = Position instance_tag = "POSITIONS:POSITION" collision_field = "_seq_num" tag_map = { "POSITIONS:POS_SEQ_NUM": "_seq_num", "POSITIONS:POS_NUM_TEXT": "position_number", "POSITIONS:POS_TITLE_CODE": "_title_code", "POSITIONS:POS_TITLE_DESC": "title", "POSITIONS:POS_ORG_CODE": "_org_code", "POSITIONS:POS_BUREAU_CODE": "_bureau_code", "POSITIONS:POS_SKILL_CODE": "_skill_code", "POSITIONS:POS_STAFF_PTRN_SKILL_CODE": "_staff_ptrn_skill_code", "POSITIONS:POS_OVERSEAS_IND": parse_boolean("is_overseas", ['O']), "POSITIONS:POS_PAY_PLAN_CODE": "_pay_plan_code", "POSITIONS:POS_STATUS_CODE": "_status_code", "POSITIONS:POS_SERVICE_TYPE_CODE": "_service_type_code", "POSITIONS:POS_GRADE_CODE": "_grade_code", "POSITIONS:POS_POST_CODE": "_post_code", "POSITIONS:POS_LANGUAGE_1_CODE": "_language_1_code", "POSITIONS:POS_LANGUAGE_2_CODE": "_language_2_code", "POSITIONS:POS_LOCATION_CODE": "_location_code", "POSITIONS:POS_LANG_REQ_1_CODE": "_language_req_1_code", "POSITIONS:POS_LANG_REQ_2_CODE": "_language_req_2_code", "POSITIONS:POS_SPEAK_PROFICIENCY_1_CODE": "_language_1_spoken_proficiency_code", "POSITIONS:POS_READ_PROFICIENCY_1_CODE": "_language_1_reading_proficiency_code", "POSITIONS:POS_SPEAK_PROFICIENCY_2_CODE": "_language_2_spoken_proficiency_code", "POSITIONS:POS_READ_PROFICIENCY_2_CODE": "_language_2_reading_proficiency_code", "POSITIONS:POS_CREATE_ID": "_create_id", "POSITIONS:POS_CREATE_DATE": parse_date("create_date"), "POSITIONS:POS_UPDATE_ID": "_update_id", "POSITIONS:POS_UPDATE_DATE": parse_date("update_date"), "POSITIONS:POS_EFFECTIVE_DATE": parse_date("effective_date"), "POSITIONS:POS_JOBCODE_CODE": "_jobcode_code", "POSITIONS:POS_OCC_SERIES_CODE": "_occ_series_code", } def post_load_function(new_ids, updated_ids): for pos in Position.objects.filter(id__in=new_ids + updated_ids): pos.update_relationships() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_tour_of_duty(): model = TourOfDuty instance_tag = "TOUR_OF_DUTIES:TOUR_OF_DUTY" collision_field = "code" tag_map = { "TOUR_OF_DUTIES:TOD_CODE": "code", "TOUR_OF_DUTIES:TOD_SHORT_DESC": "short_description", "TOUR_OF_DUTIES:TOD_DESC_TEXT": lambda instance, item: setattr(instance, "long_description", re.sub('&amp;', '&', item.text).strip()), "TOUR_OF_DUTIES:TOD_MONTHS_NUM": "months" } return (model, instance_tag, tag_map, collision_field, None) def mode_post(): model = Post instance_tag = "BIDPOSTS:BIDDING_TOOL" collision_field = "_location_code" tag_map = { "BIDPOSTS:DSC_CD": "_location_code", "BIDPOSTS:TOD_CODE": "_tod_code", "BIDPOSTS:BT_COST_OF_LIVING_ADJUST_NUM": "cost_of_living_adjustment", "BIDPOSTS:BT_DIFFERENTIAL_RATE_NUM": "differential_rate", "BIDPOSTS:BT_REST_RELAXATION_POINT_TEXT": strip_extra_spaces("rest_relaxation_point"), "BIDPOSTS:BT_DANGER_PAY_NUM": "danger_pay", "BIDPOSTS:BT_CONSUMABLE_ALLOWANCE_FLG": parse_boolean("has_consumable_allowance"), "BIDPOSTS:BT_SERVICE_NEEDS_DIFF_FLG": parse_boolean("has_service_needs_differential"), } def post_load_function(new_ids, updated_ids): for loc in Post.objects.filter(id__in=new_ids + updated_ids): loc.update_relationships() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_country(): model = Country instance_tag = "DATA_RECORD" collision_field = "code" tag_map = { "COUNTRY_CODE": "code", "FULL_NAME": "name", "SHORT_NAME": "short_name", "COUNTRY_CODE_2": "short_code", "LOCATION_PREFIX": "location_prefix" } return (model, instance_tag, tag_map, collision_field, None) def mode_location(): model = Location instance_tag = "location" collision_field = "code" tag_map = { "code": "code", "city": strip_extra_spaces("city"), "state": strip_extra_spaces("state"), "country": "_country" } def post_load_function(new_ids, updated_ids): # Connect new locations to applicable posts for loc in Location.objects.filter(id__in=new_ids + updated_ids): Post.objects.filter(_location_code=loc.code).update(location=loc) return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_capsule_description(): model = CapsuleDescription instance_tag = "position" collision_field = "_pos_seq_num" tag_map = { "POS_SEQ_NUM": "_pos_seq_num", "capsuleDescription": "content", } return (model, instance_tag, tag_map, collision_field, None) def mode_skill_cone(): model = SkillCone instance_tag = "jobCategorySkill" collision_field = None tag_map = { "id": "_id", "name": strip_extra_spaces("name"), "skill": get_nested_tag("_skill_codes", "code"), } return (model, instance_tag, tag_map, collision_field, None)
1.9375
2
gluon/tests/test_recfile.py
oscarfonts/web2py
0
2557
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Unit tests for gluon.recfile """ import unittest import os import shutil import uuid from .fix_path import fix_sys_path fix_sys_path(__file__) from gluon import recfile class TestRecfile(unittest.TestCase): def setUp(self): os.mkdir('tests') def tearDown(self): shutil.rmtree('tests') def test_generation(self): for k in range(10): teststring = 'test%s' % k filename = os.path.join('tests', str(uuid.uuid4()) + '.test') with recfile.open(filename, "w") as g: g.write(teststring) self.assertEqual(recfile.open(filename, "r").read(), teststring) is_there = recfile.exists(filename) self.assertTrue(is_there) recfile.remove(filename) is_there = recfile.exists(filename) self.assertFalse(is_there) for k in range(10): teststring = 'test%s' % k filename = str(uuid.uuid4()) + '.test' with recfile.open(filename, "w", path='tests') as g: g.write(teststring) self.assertEqual(recfile.open(filename, "r", path='tests').read(), teststring) is_there = recfile.exists(filename, path='tests') self.assertTrue(is_there) recfile.remove(filename, path='tests') is_there = recfile.exists(filename, path='tests') self.assertFalse(is_there) for k in range(10): teststring = 'test%s' % k filename = os.path.join('tests', str(uuid.uuid4()), str(uuid.uuid4()) + '.test') with recfile.open(filename, "w") as g: g.write(teststring) self.assertEqual(recfile.open(filename, "r").read(), teststring) is_there = recfile.exists(filename) self.assertTrue(is_there) recfile.remove(filename) is_there = recfile.exists(filename) self.assertFalse(is_there) def test_existing(self): filename = os.path.join('tests', str(uuid.uuid4()) + '.test') with open(filename, 'w') as g: g.write('this file exists') self.assertTrue(recfile.exists(filename)) self.assertTrue(hasattr(recfile.open(filename, "r"), 'read')) recfile.remove(filename, path='tests') self.assertFalse(recfile.exists(filename)) self.assertRaises(IOError, recfile.remove, filename) self.assertRaises(IOError, recfile.open, filename, "r") if __name__ == '__main__': unittest.main()
2.5625
3
configLambdas.py
cfrome77/liquid-stats
4
2558
<filename>configLambdas.py import json import os import subprocess from dotenv import load_dotenv from subprocess import check_output, Popen, PIPE load_dotenv() # Accessing variables. CLIENT_ID = os.environ.get('CLIENT_ID') CLIENT_SECRET = os.environ.get('CLIENT_SECRET') USERNAME = os.environ.get('USERNAME') BUCKET_NAME = os.environ.get('BUCKET_NAME') def get_lambda_functions(): function_dict = {} res = subprocess.Popen( ["aws", "lambda", "list-functions"], stdout=subprocess.PIPE ) output = res.communicate() function_dict.update(json.loads(output[0])) return function_dict['Functions'] lambda_functions = get_lambda_functions() for lambda_function in lambda_functions: function_name = lambda_function['FunctionName'] subprocess.run([ "aws", "lambda", "update-function-configuration", "--function-name", f"{function_name}", "--environment", f"Variables={{CLIENT_ID={CLIENT_ID},CLIENT_SECRET={CLIENT_SECRET},USERNAME={USERNAME},BUCKET_NAME={BUCKET_NAME}}}" ])
2.171875
2
microbitAnim.py
SaitoYutaka/microbitAnim
0
2559
<reponame>SaitoYutaka/microbitAnim<filename>microbitAnim.py # -*- coding: utf-8 -*- ########################################################################### ## Python code generated with wxFormBuilder (version Aug 8 2018) ## http://www.wxformbuilder.org/ ## ## PLEASE DO *NOT* EDIT THIS FILE! ########################################################################### import wx import wx.xrc ########################################################################### ## Class MyFrame1 ########################################################################### class MyFrame1 ( wx.Frame ): def __init__( self, parent ): wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.Point( 0,0 ), size = wx.Size( 767,507 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) self.SetSizeHints( wx.DefaultSize, wx.DefaultSize ) gbSizer1 = wx.GridBagSizer( 0, 0 ) gbSizer1.SetFlexibleDirection( wx.BOTH ) gbSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED ) self.m_button00 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button00.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button00, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button01 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button01.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button01, wx.GBPosition( 0, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button02 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button02.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button02, wx.GBPosition( 0, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button03 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button03.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button03, wx.GBPosition( 0, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button04 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button04.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button04, wx.GBPosition( 0, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button10 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button10.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button10, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button11 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button11.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button11, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button12 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button12.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button12, wx.GBPosition( 1, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button13 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button13.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button13, wx.GBPosition( 1, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button14 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button14.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button14, wx.GBPosition( 1, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button20 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button20.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button20, wx.GBPosition( 2, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button21 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button21.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button21, wx.GBPosition( 2, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button22 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button22.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button22, wx.GBPosition( 2, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button23 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button23.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button23, wx.GBPosition( 2, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button24 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button24.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button24, wx.GBPosition( 2, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button30 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button30.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button30, wx.GBPosition( 3, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button31 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button31.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button31, wx.GBPosition( 3, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button32 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button32.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button32, wx.GBPosition( 3, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button33 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button33.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button33, wx.GBPosition( 3, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button34 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button34.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button34, wx.GBPosition( 3, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button40 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button40.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button40, wx.GBPosition( 4, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button41 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button41.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button41, wx.GBPosition( 4, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button42 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button42.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button42, wx.GBPosition( 4, 2 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button43 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button43.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button43, wx.GBPosition( 4, 3 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.m_button44 = wx.Button( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,50 ), 0 ) self.m_button44.SetBackgroundColour( wx.Colour( 255, 0, 0 ) ) gbSizer1.Add( self.m_button44, wx.GBPosition( 4, 4 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 ) self.SetSizer( gbSizer1 ) self.Layout() self.m_menubar1 = wx.MenuBar( 0 ) self.m_menu1 = wx.Menu() self.m_menuItem3 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Open", wx.EmptyString, wx.ITEM_NORMAL ) self.m_menu1.Append( self.m_menuItem3 ) self.m_menuItem1 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"Save", wx.EmptyString, wx.ITEM_NORMAL ) self.m_menu1.Append( self.m_menuItem1 ) self.m_menuItem2 = wx.MenuItem( self.m_menu1, wx.ID_ANY, u"quit", wx.EmptyString, wx.ITEM_NORMAL ) self.m_menu1.Append( self.m_menuItem2 ) self.m_menubar1.Append( self.m_menu1, u"File" ) self.m_menu2 = wx.Menu() self.m_menuItem4 = wx.MenuItem( self.m_menu2, wx.ID_ANY, u"python", wx.EmptyString, wx.ITEM_NORMAL ) self.m_menu2.Append( self.m_menuItem4 ) self.m_menubar1.Append( self.m_menu2, u"export" ) self.SetMenuBar( self.m_menubar1 ) self.Centre( wx.BOTH ) # Connect Events self.m_button00.Bind( wx.EVT_BUTTON, self.onButton00Click ) self.m_button01.Bind( wx.EVT_BUTTON, self.onButton01Click ) self.m_button02.Bind( wx.EVT_BUTTON, self.onButton02Click ) self.m_button03.Bind( wx.EVT_BUTTON, self.onButton03Click ) self.m_button04.Bind( wx.EVT_BUTTON, self.onButton04Click ) self.m_button10.Bind( wx.EVT_BUTTON, self.onButton10Click ) self.m_button11.Bind( wx.EVT_BUTTON, self.onButton11Click ) self.m_button12.Bind( wx.EVT_BUTTON, self.onButton12Click ) self.m_button13.Bind( wx.EVT_BUTTON, self.onButton13Click ) self.m_button14.Bind( wx.EVT_BUTTON, self.onButton14Click ) self.m_button20.Bind( wx.EVT_BUTTON, self.onButton20Click ) self.m_button21.Bind( wx.EVT_BUTTON, self.onButton21Click ) self.m_button22.Bind( wx.EVT_BUTTON, self.onButton22Click ) self.m_button23.Bind( wx.EVT_BUTTON, self.onButton23Click ) self.m_button24.Bind( wx.EVT_BUTTON, self.onButton24Click ) self.m_button30.Bind( wx.EVT_BUTTON, self.onButton30Click ) self.m_button31.Bind( wx.EVT_BUTTON, self.onButton31Click ) self.m_button32.Bind( wx.EVT_BUTTON, self.onButton32Click ) self.m_button33.Bind( wx.EVT_BUTTON, self.onButton33Click ) self.m_button34.Bind( wx.EVT_BUTTON, self.onButton34Click ) self.m_button40.Bind( wx.EVT_BUTTON, self.onButton40Click ) self.m_button41.Bind( wx.EVT_BUTTON, self.onButton41Click ) self.m_button42.Bind( wx.EVT_BUTTON, self.onButton42Click ) self.m_button43.Bind( wx.EVT_BUTTON, self.onButton43Click ) self.m_button44.Bind( wx.EVT_BUTTON, self.onButton44Click ) self.Bind( wx.EVT_MENU, self.OnMenuOpenSelect, id = self.m_menuItem3.GetId() ) self.Bind( wx.EVT_MENU, self.OnMenuSaveSelect, id = self.m_menuItem1.GetId() ) self.Bind( wx.EVT_MENU, self.OnMenuQuitSelect, id = self.m_menuItem2.GetId() ) self.Bind( wx.EVT_MENU, self.OnExportPythonSelect, id = self.m_menuItem4.GetId() ) def __del__( self ): pass # Virtual event handlers, overide them in your derived class def onButton00Click( self, event ): event.Skip() def onButton01Click( self, event ): event.Skip() def onButton02Click( self, event ): event.Skip() def onButton03Click( self, event ): event.Skip() def onButton04Click( self, event ): event.Skip() def onButton10Click( self, event ): event.Skip() def onButton11Click( self, event ): event.Skip() def onButton12Click( self, event ): event.Skip() def onButton13Click( self, event ): event.Skip() def onButton14Click( self, event ): event.Skip() def onButton20Click( self, event ): event.Skip() def onButton21Click( self, event ): event.Skip() def onButton22Click( self, event ): event.Skip() def onButton23Click( self, event ): event.Skip() def onButton24Click( self, event ): event.Skip() def onButton30Click( self, event ): event.Skip() def onButton31Click( self, event ): event.Skip() def onButton32Click( self, event ): event.Skip() def onButton33Click( self, event ): event.Skip() def onButton34Click( self, event ): event.Skip() def onButton40Click( self, event ): event.Skip() def onButton41Click( self, event ): event.Skip() def onButton42Click( self, event ): event.Skip() def onButton43Click( self, event ): event.Skip() def onButton44Click( self, event ): event.Skip() def OnMenuOpenSelect( self, event ): event.Skip() def OnMenuSaveSelect( self, event ): event.Skip() def OnMenuQuitSelect( self, event ): event.Skip() def OnExportPythonSelect( self, event ): event.Skip()
1.8125
2
src/dependencies/contrib/celery.py
nicoddemus/dependencies
0
2560
<reponame>nicoddemus/dependencies """ dependencies.contrib.celery --------------------------- This module implements injectable Celery task. :copyright: (c) 2016-2020 by dry-python team. :license: BSD, see LICENSE for more details. """ from _dependencies.contrib.celery import shared_task from _dependencies.contrib.celery import task __all__ = ["shared_task", "task"]
1.320313
1
yolo3/utils.py
gaxu/keras-yolo3
0
2561
"""Miscellaneous utility functions.""" from functools import reduce from PIL import Image import numpy as np from matplotlib.colors import rgb_to_hsv, hsv_to_rgb def compose(*funcs): """Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/ """ # return lambda x: reduce(lambda v, f: f(v), funcs, x) if funcs: return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs) else: raise ValueError('Composition of empty sequence not supported.') def letterbox_image(image, size): '''resize image with unchanged aspect ratio using padding''' iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image def rand(a=0, b=1): return np.random.rand()*(b-a) + a def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True): '''random preprocessing for real-time data augmentation''' line = annotation_line.split() image = Image.open(line[0]) iw, ih = image.size h, w = input_shape box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) if not random: # resize image scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) dx = (w-nw)//2 dy = (h-nh)//2 image_data=0 if proc_img: image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', (w,h), (128,128,128)) new_image.paste(image, (dx, dy)) image_data = np.array(new_image)/255. # correct boxes box_data = np.zeros((max_boxes,5)) if len(box)>0: np.random.shuffle(box) if len(box)>max_boxes: box = box[:max_boxes] box[:, [0,2]] = box[:, [0,2]]*scale + dx box[:, [1,3]] = box[:, [1,3]]*scale + dy box_data[:len(box)] = box return image_data, box_data # resize image new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter) scale = rand(.25, 2) if new_ar < 1: nh = int(scale*h) nw = int(nh*new_ar) else: nw = int(scale*w) nh = int(nw/new_ar) image = image.resize((nw,nh), Image.BICUBIC) # place image dx = int(rand(0, w-nw)) dy = int(rand(0, h-nh)) new_image = Image.new('RGB', (w,h), (128,128,128)) new_image.paste(image, (dx, dy)) image = new_image # flip image or not flip = rand()<.5 if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT) # distort image hue = rand(-hue, hue) sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat) val = rand(1, val) if rand()<.5 else 1/rand(1, val) x = rgb_to_hsv(np.array(image)/255.) x[..., 0] += hue x[..., 0][x[..., 0]>1] -= 1 x[..., 0][x[..., 0]<0] += 1 x[..., 1] *= sat x[..., 2] *= val x[x>1] = 1 x[x<0] = 0 image_data = hsv_to_rgb(x) # numpy array, 0 to 1 # correct boxes box_data = np.zeros((max_boxes,5)) if len(box)>0: np.random.shuffle(box) box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy if flip: box[:, [0,2]] = w - box[:, [2,0]] box[:, 0:2][box[:, 0:2]<0] = 0 box[:, 2][box[:, 2]>w] = w box[:, 3][box[:, 3]>h] = h box_w = box[:, 2] - box[:, 0] box_h = box[:, 3] - box[:, 1] box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box if len(box)>max_boxes: box = box[:max_boxes] box_data[:len(box)] = box return image_data, box_data def get_random_data2(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True): '''random preprocessing for real-time data augmentation''' line = annotation_line.split() image = Image.open(line[0]) w, h = image.size #13 14 dx, dy = input_shape box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) x_min = w x_max = 0 y_min = h y_max = 0 for bbox in box: x_min = min(x_min, bbox[0]) y_min = min(y_min, bbox[1]) x_max = max(x_max, bbox[2]) y_max = max(y_max, bbox[3]) name = bbox[4] # 包含所有目标框的最小框到各个边的距离 d_to_left = x_min d_to_right = w - x_max d_to_top = y_min d_to_bottom = h - y_max # 随机扩展这个最小范围 crop_x_min = int(x_min - rand(0, d_to_left)) crop_y_min = int(y_min - rand(0, d_to_top)) crop_x_max = int(x_max + rand(0, d_to_right)) crop_y_max = int(y_max + rand(0, d_to_bottom)) # 确保不出界 crop_x_min = max(0, crop_x_min) crop_y_min = max(0, crop_y_min) crop_x_max = min(w, crop_x_max) crop_y_max = min(h, crop_y_max) cropped = image.crop((crop_x_min, crop_y_min, crop_x_max, crop_y_max)) # (left, upper, right, lower) new_image = Image.new('RGB', (w,h), (128,128,128)) new_image.paste(cropped, (dx, dy)) image_data = np.array(new_image)/255. box_data = np.zeros((max_boxes,5)) if len(box)>0: np.random.shuffle(box) if len(box)>max_boxes: box = box[:max_boxes] box[:,0] = box[:,0]-crop_y_min box[:,1] = box[:,1]-crop_y_min box[:,2] = box[:,2]-crop_x_min box[:,3] = box[:,3]-crop_y_min box_data[:len(box)] = box return image_data, box_data def get_random_data2(annotation_line, input_shape, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True): line = annotation_line.split() img = cv2.imread(line[0]) h_img, w_img, _ = img.shape w, h = input_shape box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) max_bbox = np.concatenate([np.min(box[:, 0:2], axis=0), np.max(box[:, 2:4], axis=0)], axis=-1)# 取得所有bbox中的最大bbox #包含所有目標框的最大框到各個邊的距離 max_l_trans = max_bbox[0] max_u_trans = max_bbox[1] max_r_trans = w_img - max_bbox[2] max_d_trans = h_img - max_bbox[3] #隨機擴展框最大範圍 crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)*2)) crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)*2)) crop_xmax = max(w_img, int(max_bbox[2] + random.uniform(0, max_r_trans)*2)) crop_ymax = max(h_img, int(max_bbox[3] + random.uniform(0, max_d_trans)*2)) img = img[crop_ymin : crop_ymax, crop_xmin : crop_xmax] #進行裁剪 image = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB)) #因為目前圖片格式是cv2,因此要轉換為PIL格式做貼上的語法 new_image = Image.new('RGB', (w,h), (128,128,128)) #產出一個(416,416)的灰色圖片 new_image.paste(image, (0, 0)) #將轉為PIL格式的圖片 貼到灰色圖片中 img2 = cv2.cvtColor(np.asarray(new_image),cv2.COLOR_RGB2BGR) #再將格式轉回cv2 box_data = np.zeros((max_boxes,5)) #box最多有max_boxes個,即shap->(20,5) #將剪裁後位移的框與原始框進行相減,避免變換之後的值過大或過小,並去除異常的box if len(box)>0: np.random.shuffle(box) if len(box)>max_boxes: box = box[:max_boxes] box[:, [0, 2]] = box[:, [0, 2]] - crop_xmin box[:, [1, 3]] = box[:, [1, 3]] - crop_ymin box[:, 2][box[:, 2]>w] = w box[:, 3][box[:, 3]>h] = h box_w = box[:, 2] - box[:, 0] box_h = box[:, 3] - box[:, 1] box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box if len(box)>max_boxes: box = box[:max_boxes] box_data[:len(box)] = box #標框線 # light_blue = (255,200,100) # for boxs in box: # cv2.rectangle(img2,(boxs[0],boxs[1]),(boxs[2],boxs[3]),light_blue,2) # writename=os.path.basename(line[0]) #取檔名 # cv2.imshow('My Image', img2) # cv2.waitKey(0) return img2, box_data
3.078125
3
text_classification/config.py
MardanovTimur/kaggle
0
2562
<gh_stars>0 import logging import pathlib logging.basicConfig(level=logging.INFO) # Dirs ROOT_DIR = pathlib.Path(__file__).parent.absolute() DUMP_DIR = ROOT_DIR / 'dumps'
1.625
2
sportsreference/ncaaf/rankings.py
JosephDErwin/sportsreference
0
2563
import re from pyquery import PyQuery as pq from .. import utils from .constants import RANKINGS_SCHEME, RANKINGS_URL from six.moves.urllib.error import HTTPError class Rankings: """ Get all Associated Press (AP) rankings on a week-by-week basis. Grab a list of the rankings published by the Associated Press to easily query the hierarchy of teams each week. The results expose the current and previous rankings as well as the movement for each team in the list. Parameters ---------- year : string (optional) A string of the requested year to pull rankings from. Defaults to the most recent season. """ def __init__(self, year=None): self._rankings = {} self._find_rankings(year) def _pull_rankings_page(self, year): """ Download the rankings page. Download the rankings page for the requested year and create a PyQuery object. Parameters ---------- year : string A string of the requested year to pull rankings from. Returns ------- PyQuery object Returns a PyQuery object of the rankings HTML page. """ try: return pq(RANKINGS_URL % year) except HTTPError: return None def _get_team(self, team): """ Retrieve team's name and abbreviation. The team's name and abbreviation are embedded within the 'school_name' tag and, in the case of the abbreviation, require special parsing as it is located in the middle of a URI. The name and abbreviation are returned for the requested school. Parameters ---------- team : PyQuery object A PyQuery object representing a single row in a table on the rankings page. Returns ------- tuple (string, string) Returns a tuple of two strings where the first string is the team's abbreviation, such as 'PURDUE' and the second string is the team's name, such as 'Purdue'. """ name_tag = team('td[data-stat="school_name"]') abbreviation = re.sub(r'.*/cfb/schools/', '', str(name_tag('a'))) abbreviation = re.sub(r'/.*', '', abbreviation) name = team('td[data-stat="school_name"] a').text() return abbreviation, name def _find_rankings(self, year): """ Retrieve the rankings for each week. Find and retrieve all AP rankings for the requested year and combine them on a per-week basis. Each week contains information about the name, abbreviation, rank, movement, and previous rank for each team as well as the date and week number the results were published on. Parameters ---------- year : string A string of the requested year to pull rankings from. """ if not year: year = utils._find_year_for_season('ncaaf') page = self._pull_rankings_page(year) if not page: output = ("Can't pull rankings page. Ensure the following URL " "exists: %s" % RANKINGS_URL) raise ValueError(output) rankings = page('table#ap tbody tr').items() weekly_rankings = [] week = 0 for team in rankings: if 'class="thead"' in str(team): self._rankings[int(week)] = weekly_rankings weekly_rankings = [] continue abbreviation, name = self._get_team(team) rank = utils._parse_field(RANKINGS_SCHEME, team, 'rank') week = utils._parse_field(RANKINGS_SCHEME, team, 'week') date = utils._parse_field(RANKINGS_SCHEME, team, 'date') previous = utils._parse_field(RANKINGS_SCHEME, team, 'previous') change = utils._parse_field(RANKINGS_SCHEME, team, 'change') if 'decrease' in str(team(RANKINGS_SCHEME['change'])): change = int(change) * -1 elif 'increase' in str(team(RANKINGS_SCHEME['change'])): try: change = int(change) except ValueError: change = 0 else: change = 0 rank_details = { 'abbreviation': abbreviation, 'name': name, 'rank': int(rank), 'week': int(week), 'date': date, 'previous': previous, 'change': change } weekly_rankings.append(rank_details) # Add the final rankings which is not terminated with another header # row and hence will not hit the first if statement in the loop above. self._rankings[int(week)] = weekly_rankings @property def current_extended(self): """ Returns a ``list`` of ``dictionaries`` of the most recent AP rankings. The list is ordered in terms of the ranking so the #1 team will be in the first element and the #25 team will be the last element. Each dictionary has the following structure:: { 'abbreviation': Team's abbreviation, such as 'PURDUE' (str), 'name': Team's full name, such as 'Purdue' (str), 'rank': Team's rank for the current week (int), 'week': Week number for the results, such as 19 (int), 'date': Date the rankings were released, such as '2017-03-01'. Can also be 'Final' for the final rankings or 'Preseason' for preseason rankings (str), 'previous': The team's previous rank, if applicable (str), 'change': The amount the team moved up or down the rankings. Moves up the ladder have a positive number while drops yield a negative number and teams that didn't move have 0 (int) } """ latest_week = max(self._rankings.keys()) ordered_dict = sorted(self._rankings[latest_week], key=lambda k: k['rank']) return ordered_dict @property def current(self): """ Returns a ``dictionary`` of the most recent rankings from the Associated Press where each key is a ``string`` of the team's abbreviation and each value is an ``int`` of the team's rank for the current week. """ rankings_dict = {} for team in self.current_extended: rankings_dict[team['abbreviation']] = team['rank'] return rankings_dict @property def complete(self): """ Returns a ``dictionary`` where each key is a week number as an ``int`` and each value is a ``list`` of ``dictionaries`` containing the AP rankings for each week. Within each list is a dictionary of team information such as name, abbreviation, rank, and more. Note that the list might not necessarily be in the same order as the rankings. The overall dictionary has the following structure:: { week number, ie 16 (int): [ { 'abbreviation': Team's abbreviation, such as 'PURDUE' (str), 'name': Team's full name, such as 'Purdue' (str), 'rank': Team's rank for the current week (int), 'week': Week number for the results, such as 16 (int), 'date': Date the rankings were released, such as '2017-12-03'. Can also be 'Final' for the final rankings or 'Preseason' for preseason rankings (str), 'previous': The team's previous rank, if applicable (str), 'change': The amount the team moved up or down the rankings. Moves up the ladder have a positive number while drops yield a negative number and teams that didn't move have 0 (int) }, ... ], ... } """ return self._rankings
3.234375
3
ding/hpc_rl/wrapper.py
davide97l/DI-engine
1
2564
<filename>ding/hpc_rl/wrapper.py import importlib from ditk import logging from collections import OrderedDict from functools import wraps import ding ''' Overview: `hpc_wrapper` is the wrapper for functions which are supported by hpc. If a function is wrapped by it, we will search for its hpc type and return the function implemented by hpc. We will use the following code as a sample to introduce `hpc_wrapper`: ``` @hpc_wrapper(shape_fn=shape_fn_dntd, namedtuple_data=True, include_args=[0,1,2,3], include_kwargs=['data', 'gamma', 'v_min', 'v_max'], is_cls_method=False) def dist_nstep_td_error( data: namedtuple, gamma: float, v_min: float, v_max: float, n_atom: int, nstep: int = 1, ) -> torch.Tensor: ... ``` Parameters: - shape_fn (:obj:`function`): a function which return the shape needed by hpc function. In fact, it returns all args that the hpc function needs. - nametuple_data (:obj:`bool`): If True, when hpc function is called, it will be called as hpc_function(*nametuple). If False, nametuple data will remain its `nametuple` type. - include_args (:obj:`list`): a list of index of the args need to be set in hpc function. As shown in the sample, include_args=[0,1,2,3], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function. - include_kwargs (:obj:`list`): a list of key of the kwargs need to be set in hpc function. As shown in the sample, include_kwargs=['data', 'gamma', 'v_min', 'v_max'], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function. - is_cls_method (:obj:`bool`): If True, it means the function we wrap is a method of a class. `self` will be put into args. We will get rid of `self` in args. Besides, we will use its classname as its fn_name. If False, it means the function is a simple method. Q&A: - Q: Is `include_args` and `include_kwargs` need to be set at the same time? - A: Yes. `include_args` and `include_kwargs` can deal with all type of input, such as (data, gamma, v_min=v_min, v_max=v_max) and (data, gamma, v_min, v_max). - Q: What is `hpc_fns`? - A: Here we show a normal `hpc_fns`: ``` hpc_fns = { 'fn_name1': { 'runtime_name1': hpc_fn1, 'runtime_name2': hpc_fn2, ... }, ... } ``` Besides, `per_fn_limit` means the max length of `hpc_fns[fn_name]`. When new function comes, the oldest function will be popped from `hpc_fns[fn_name]`. ''' hpc_fns = {} per_fn_limit = 3 def register_runtime_fn(fn_name, runtime_name, shape): fn_name_mapping = { 'gae': ['hpc_rll.rl_utils.gae', 'GAE'], 'dist_nstep_td_error': ['hpc_rll.rl_utils.td', 'DistNStepTD'], 'LSTM': ['hpc_rll.torch_utils.network.rnn', 'LSTM'], 'ppo_error': ['hpc_rll.rl_utils.ppo', 'PPO'], 'q_nstep_td_error': ['hpc_rll.rl_utils.td', 'QNStepTD'], 'q_nstep_td_error_with_rescale': ['hpc_rll.rl_utils.td', 'QNStepTDRescale'], 'ScatterConnection': ['hpc_rll.torch_utils.network.scatter_connection', 'ScatterConnection'], 'td_lambda_error': ['hpc_rll.rl_utils.td', 'TDLambda'], 'upgo_loss': ['hpc_rll.rl_utils.upgo', 'UPGO'], 'vtrace_error': ['hpc_rll.rl_utils.vtrace', 'VTrace'], } fn_str = fn_name_mapping[fn_name] cls = getattr(importlib.import_module(fn_str[0]), fn_str[1]) hpc_fn = cls(*shape).cuda() if fn_name not in hpc_fns: hpc_fns[fn_name] = OrderedDict() hpc_fns[fn_name][runtime_name] = hpc_fn while len(hpc_fns[fn_name]) > per_fn_limit: hpc_fns[fn_name].popitem(last=False) # print(hpc_fns) return hpc_fn def hpc_wrapper(shape_fn=None, namedtuple_data=False, include_args=[], include_kwargs=[], is_cls_method=False): def decorate(fn): @wraps(fn) def wrapper(*args, **kwargs): if ding.enable_hpc_rl: shape = shape_fn(args, kwargs) if is_cls_method: fn_name = args[0].__class__.__name__ else: fn_name = fn.__name__ runtime_name = '_'.join([fn_name] + [str(s) for s in shape]) if fn_name not in hpc_fns or runtime_name not in hpc_fns[fn_name]: hpc_fn = register_runtime_fn(fn_name, runtime_name, shape) else: hpc_fn = hpc_fns[fn_name][runtime_name] if is_cls_method: args = args[1:] clean_args = [] for i in include_args: if i < len(args): clean_args.append(args[i]) nouse_args = list(set(list(range(len(args)))).difference(set(include_args))) clean_kwargs = {} for k, v in kwargs.items(): if k in include_kwargs: if k == 'lambda_': k = 'lambda' clean_kwargs[k] = v nouse_kwargs = list(set(kwargs.keys()).difference(set(include_kwargs))) if len(nouse_args) > 0 or len(nouse_kwargs) > 0: logging.warn( 'in {}, index {} of args are dropped, and keys {} of kwargs are dropped.'.format( runtime_name, nouse_args, nouse_kwargs ) ) if namedtuple_data: data = args[0] # args[0] is a namedtuple return hpc_fn(*data, *clean_args[1:], **clean_kwargs) else: return hpc_fn(*clean_args, **clean_kwargs) else: return fn(*args, **kwargs) return wrapper return decorate
2.4375
2
CodeWars/2016/NumberOfOccurrences-7k.py
JLJTECH/TutorialTesting
0
2565
#Return the count of int(s) in passed array. def number_of_occurrences(s, xs): return xs.count(s)
2.796875
3
telethon_generator/parsers/tlobject.py
islam-200555/Telethon
2
2566
import re from zlib import crc32 from ..utils import snake_to_camel_case CORE_TYPES = ( 0xbc799737, # boolFalse#bc799737 = Bool; 0x997275b5, # boolTrue#997275b5 = Bool; 0x3fedd339, # true#3fedd339 = True; 0x1cb5c415, # vector#1cb5c415 {t:Type} # [ t ] = Vector t; ) # https://github.com/telegramdesktop/tdesktop/blob/4bf66cb6e93f3965b40084771b595e93d0b11bcd/Telegram/SourceFiles/codegen/scheme/codegen_scheme.py#L57-L62 WHITELISTED_MISMATCHING_IDS = { # 0 represents any layer 0: {'ipPortSecret', 'accessPointRule', 'help.configSimple'}, 77: {'channel'}, 78: {'channel'} } class TLObject: def __init__(self, fullname, object_id, args, result, is_function, layer): """ Initializes a new TLObject, given its properties. :param fullname: The fullname of the TL object (namespace.name) The namespace can be omitted. :param object_id: The hexadecimal string representing the object ID :param args: The arguments, if any, of the TL object :param result: The result type of the TL object :param is_function: Is the object a function or a type? :param layer: The layer this TLObject belongs to. """ # The name can or not have a namespace self.fullname = fullname if '.' in fullname: self.namespace, self.name = fullname.split('.', maxsplit=1) else: self.namespace, self.name = None, fullname self.args = args self.result = result self.is_function = is_function self.id = None if object_id is None: self.id = self.infer_id() else: self.id = int(object_id, base=16) whitelist = WHITELISTED_MISMATCHING_IDS[0] |\ WHITELISTED_MISMATCHING_IDS.get(layer, set()) if self.fullname not in whitelist: assert self.id == self.infer_id(),\ 'Invalid inferred ID for ' + repr(self) self.class_name = snake_to_camel_case( self.name, suffix='Request' if self.is_function else '') self.real_args = list(a for a in self.sorted_args() if not (a.flag_indicator or a.generic_definition)) def sorted_args(self): """Returns the arguments properly sorted and ready to plug-in into a Python's method header (i.e., flags and those which can be inferred will go last so they can default =None) """ return sorted(self.args, key=lambda x: x.is_flag or x.can_be_inferred) def __repr__(self, ignore_id=False): if self.id is None or ignore_id: hex_id = '' else: hex_id = '#{:08x}'.format(self.id) if self.args: args = ' ' + ' '.join([repr(arg) for arg in self.args]) else: args = '' return '{}{}{} = {}'.format(self.fullname, hex_id, args, self.result) def infer_id(self): representation = self.__repr__(ignore_id=True) representation = representation\ .replace(':bytes ', ':string ')\ .replace('?bytes ', '?string ')\ .replace('<', ' ').replace('>', '')\ .replace('{', '').replace('}', '') representation = re.sub( r' \w+:flags\.\d+\?true', r'', representation ) return crc32(representation.encode('ascii')) class TLArg: def __init__(self, name, arg_type, generic_definition): """ Initializes a new .tl argument :param name: The name of the .tl argument :param arg_type: The type of the .tl argument :param generic_definition: Is the argument a generic definition? (i.e. {X:Type}) """ self.name = 'is_self' if name == 'self' else name # Default values self.is_vector = False self.is_flag = False self.skip_constructor_id = False self.flag_index = -1 # Special case: some types can be inferred, which makes it # less annoying to type. Currently the only type that can # be inferred is if the name is 'random_id', to which a # random ID will be assigned if left as None (the default) self.can_be_inferred = name == 'random_id' # The type can be an indicator that other arguments will be flags if arg_type == '#': self.flag_indicator = True self.type = None self.is_generic = False else: self.flag_indicator = False self.is_generic = arg_type.startswith('!') # Strip the exclamation mark always to have only the name self.type = arg_type.lstrip('!') # The type may be a flag (flags.IDX?REAL_TYPE) # Note that 'flags' is NOT the flags name; this # is determined by a previous argument # However, we assume that the argument will always be called 'flags' flag_match = re.match(r'flags.(\d+)\?([\w<>.]+)', self.type) if flag_match: self.is_flag = True self.flag_index = int(flag_match.group(1)) # Update the type to match the exact type, not the "flagged" one self.type = flag_match.group(2) # Then check if the type is a Vector<REAL_TYPE> vector_match = re.match(r'[Vv]ector<([\w\d.]+)>', self.type) if vector_match: self.is_vector = True # If the type's first letter is not uppercase, then # it is a constructor and we use (read/write) its ID # as pinpointed on issue #81. self.use_vector_id = self.type[0] == 'V' # Update the type to match the one inside the vector self.type = vector_match.group(1) # See use_vector_id. An example of such case is ipPort in # help.configSpecial if self.type.split('.')[-1][0].islower(): self.skip_constructor_id = True # The name may contain "date" in it, if this is the case and the type is "int", # we can safely assume that this should be treated as a "date" object. # Note that this is not a valid Telegram object, but it's easier to work with if self.type == 'int' and ( re.search(r'(\b|_)date\b', name) or name in ('expires', 'expires_at', 'was_online')): self.type = 'date' self.generic_definition = generic_definition def type_hint(self): type = self.type if '.' in type: type = type.split('.')[1] result = { 'int': 'int', 'long': 'int', 'int128': 'int', 'int256': 'int', 'string': 'str', 'date': 'Optional[datetime]', # None date = 0 timestamp 'bytes': 'bytes', 'true': 'bool', }.get(type, "Type{}".format(type)) if self.is_vector: result = 'List[{}]'.format(result) if self.is_flag and type != 'date': result = 'Optional[{}]'.format(result) return result def __str__(self): # Find the real type representation by updating it as required real_type = self.type if self.flag_indicator: real_type = '#' if self.is_vector: if self.use_vector_id: real_type = 'Vector<{}>'.format(real_type) else: real_type = 'vector<{}>'.format(real_type) if self.is_generic: real_type = '!{}'.format(real_type) if self.is_flag: real_type = 'flags.{}?{}'.format(self.flag_index, real_type) if self.generic_definition: return '{{{}:{}}}'.format(self.name, real_type) else: return '{}:{}'.format(self.name, real_type) def __repr__(self): return str(self).replace(':date', ':int').replace('?date', '?int') def _from_line(line, is_function, layer): match = re.match( r'^([\w.]+)' # 'name' r'(?:#([0-9a-fA-F]+))?' # '#optionalcode' r'(?:\s{?\w+:[\w\d<>#.?!]+}?)*' # '{args:.0?type}' r'\s=\s' # ' = ' r'([\w\d<>#.?]+);$', # '<result.type>;' line ) if match is None: # Probably "vector#1cb5c415 {t:Type} # [ t ] = Vector t;" raise ValueError('Cannot parse TLObject {}'.format(line)) args_match = re.findall( r'({)?' r'(\w+)' r':' r'([\w\d<>#.?!]+)' r'}?', line ) return TLObject( fullname=match.group(1), object_id=match.group(2), result=match.group(3), is_function=is_function, layer=layer, args=[TLArg(name, arg_type, brace != '') for brace, name, arg_type in args_match] ) def parse_tl(file_path, layer, ignore_core=False): """This method yields TLObjects from a given .tl file.""" with open(file_path, encoding='utf-8') as file: is_function = False for line in file: comment_index = line.find('//') if comment_index != -1: line = line[:comment_index] line = line.strip() if not line: continue match = re.match('---(\w+)---', line) if match: following_types = match.group(1) is_function = following_types == 'functions' continue try: result = _from_line(line, is_function, layer=layer) if not ignore_core or result.id not in CORE_TYPES: yield result except ValueError as e: if 'vector#1cb5c415' not in str(e): raise def find_layer(file_path): """Finds the layer used on the specified scheme.tl file.""" layer_regex = re.compile(r'^//\s*LAYER\s*(\d+)$') with open(file_path, encoding='utf-8') as file: for line in file: match = layer_regex.match(line) if match: return int(match.group(1))
2.296875
2
dotnet/private/actions/resx_core.bzl
purkhusid/rules_dotnet
143
2567
<reponame>purkhusid/rules_dotnet "Actions for compiling resx files" load( "@io_bazel_rules_dotnet//dotnet/private:providers.bzl", "DotnetResourceInfo", ) def _make_runner_arglist(dotnet, source, output, resgen): args = dotnet.actions.args() if type(source) == "Target": args.add_all(source.files) else: args.add(source) args.add(output) return args def emit_resx_core( dotnet, name = "", src = None, identifier = None, out = None, customresgen = None): """The function adds an action that compiles a single .resx file into .resources file. Returns [DotnetResourceInfo](api.md#dotnetresourceinfo). Args: dotnet: [DotnetContextInfo](api.md#dotnetcontextinfo). name: name of the file to generate. src: The .resx source file that is transformed into .resources file. Only `.resx` files are permitted. identifier: The logical name for the resource; the name that is used to load the resource. The default is the basename of the file name (no subfolder). out: An alternative name of the output file (if name should not be used). customresgen: custom resgen program to use. Returns: DotnetResourceInfo: [DotnetResourceInfo](api.md#dotnetresourceinfo). """ if name == "" and out == None: fail("either name or out must be set") if not out: result = dotnet.actions.declare_file(name + ".resources") else: result = dotnet.actions.declare_file(out) args = _make_runner_arglist(dotnet, src, result, customresgen.files_to_run.executable.path) # We use the command to extrace shell path and force runfiles creation resolve = dotnet._ctx.resolve_tools(tools = [customresgen]) inputs = src.files.to_list() if type(src) == "Target" else [src] dotnet.actions.run( inputs = inputs + resolve[0].to_list(), tools = customresgen.default_runfiles.files, outputs = [result], executable = customresgen.files_to_run, arguments = [args], env = {"RUNFILES_MANIFEST_FILE": customresgen.files_to_run.runfiles_manifest.path}, mnemonic = "CoreResxCompile", input_manifests = resolve[1], progress_message = ( "Compiling resoources" + dotnet.label.package + ":" + dotnet.label.name ), ) return DotnetResourceInfo( name = name, result = result, identifier = identifier, )
2.34375
2
test/jit/test_modules.py
xiaohanhuang/pytorch
183
2568
# Owner(s): ["oncall: jit"] import torch import os import sys from torch.testing._internal.jit_utils import JitTestCase # Make the helper files in test/ importable pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) if __name__ == '__main__': raise RuntimeError("This test file is not meant to be run directly, use:\n\n" "\tpython test/test_jit.py TESTNAME\n\n" "instead.") class TestModules(JitTestCase): def test_script_module_with_constants_list(self): """ Test that a module that has __constants__ set to something that is not a set can be scripted. """ # torch.nn.Linear has a __constants__ attribute defined # and intialized to a list. class Net(torch.nn.Linear): x: torch.jit.Final[int] def __init__(self): super().__init__(5, 10) self.x = 0 self.checkModule(Net(), (torch.randn(5),))
2.21875
2
pixelate_task_1.py
Swayamshu/Pixelate_Sample_Arena
0
2569
<filename>pixelate_task_1.py import gym import pix_sample_arena import time import pybullet as p import pybullet_data import cv2 if __name__ == "__main__": env = gym.make("pix_sample_arena-v0") x = 0 while True: p.stepSimulation() time.sleep(100)
1.953125
2
tests/test_timeparser.py
vgoehler/python-i3-battery-block
0
2570
<gh_stars>0 from datetime import time import pytest from i3_battery_block_vgg.timeparser import __parse_time_manually from i3_battery_block_vgg.timeparser import parse_time @pytest.mark.parametrize( "time_input, expected", [ ("12:13", time(hour=12, minute=13)), ("12:13:14", time(hour=12, minute=13, second=14)), ('00:54:00', time(hour=0, minute=54, second=0)) ] ) def test_manually_time_parsing(time_input: str, expected: time): assert __parse_time_manually(time_input) == expected, "manual time parsing has gone wrong" @pytest.mark.parametrize( "time_input, expected", [ ("12:13", time(hour=12, minute=13)), ("12:13:14", time(hour=12, minute=13, second=14)), ('00:54:00', time(hour=0, minute=54, second=0)) ] ) def test_time_parsing(time_input: str, expected: time): assert parse_time(time_input) == expected, "time parsing has gone wrong"
2.75
3
frontends/PyCDE/test/polynomial.py
fyquah/circt
0
2571
<filename>frontends/PyCDE/test/polynomial.py # RUN: %PYTHON% %s 2>&1 | FileCheck %s from __future__ import annotations import mlir import pycde from pycde import (Input, Output, Parameter, module, externmodule, generator, types, dim) from circt.dialects import comb, hw @module def PolynomialCompute(coefficients: Coefficients): class PolynomialCompute: """Module to compute ax^3 + bx^2 + cx + d for design-time coefficients""" # Evaluate polynomial for 'x'. x = Input(types.i32) y = Output(types.int(8 * 4)) unused_parameter = Parameter(True) def __init__(self, name: str): """coefficients is in 'd' -> 'a' order.""" self.instanceName = name @staticmethod def get_module_name(): return "PolyComputeForCoeff_" + '_'.join( [str(x) for x in coefficients.coeff]) @generator def construct(mod): """Implement this module for input 'x'.""" x = mod.x taps = list() for power, coeff in enumerate(coefficients.coeff): coeffVal = hw.ConstantOp.create(types.i32, coeff) if power == 0: newPartialSum = coeffVal.result else: partialSum = taps[-1] if power == 1: currPow = x else: x_power = [x for i in range(power)] currPow = comb.MulOp.create(*x_power) newPartialSum = comb.AddOp.create( partialSum, comb.MulOp.create(coeffVal, currPow)) taps.append(newPartialSum) # Final output return {"y": taps[-1]} return PolynomialCompute @externmodule("supercooldevice") class CoolPolynomialCompute: x = Input(types.i32) y = Output(types.i32) def __init__(self, coefficients): self.coefficients = coefficients class Coefficients: def __init__(self, coeff): self.coeff = coeff class Polynomial(pycde.System): inputs = [] outputs = [('y', types.i32)] def build(self, top): i32 = types.i32 x = hw.ConstantOp.create(i32, 23) poly = PolynomialCompute(Coefficients([62, 42, 6]))("example", x=x) PolynomialCompute(coefficients=Coefficients([62, 42, 6]))("example2", x=poly.y) PolynomialCompute(Coefficients([1, 2, 3, 4, 5]))("example2", x=poly.y) CoolPolynomialCompute([4, 42], x=x) return {"y": poly.y} poly = Polynomial() poly.graph() # CHECK-LABEL: digraph "top" # CHECK: label="top"; # CHECK: [shape=record,label="{hw.constant\ni32\n\nvalue: 23 : i32}"]; poly.print() # CHECK-LABEL: hw.module @top() -> (%y: i32) # CHECK: [[REG0:%.+]] = "pycde.PolynomialCompute"(%c23_i32) {instanceName = "example", opNames = ["x"], parameters = {coefficients = {coeff = [62, 42, 6]}, module_name = "PolyComputeForCoeff_62_42_6", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32 # CHECK: [[REG1:%.+]] = "pycde.PolynomialCompute"([[REG0]]) {instanceName = "example2", opNames = ["x"], parameters = {coefficients = {coeff = [62, 42, 6]}, module_name = "PolyComputeForCoeff_62_42_6", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32 # CHECK: [[REG2:%.+]] = "pycde.PolynomialCompute"([[REG0]]) {instanceName = "example2", opNames = ["x"], parameters = {coefficients = {coeff = [1, 2, 3, 4, 5]}, module_name = "PolyComputeForCoeff_1_2_3_4_5", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32 # CHECK: [[REG3:%.+]] = "pycde.CoolPolynomialCompute"(%c23_i32) {coefficients = [4, 42], opNames = ["x"], parameters = {}, resultNames = ["y"]} : (i32) -> i32 # CHECK: hw.output [[REG0]] : i32 poly.generate() poly.print() # CHECK-LABEL: hw.module @top # CHECK: %example.y = hw.instance "example" @PolyComputeForCoeff_62_42_6(%c23_i32) {parameters = {}} : (i32) -> i32 # CHECK: %example2.y = hw.instance "example2" @PolyComputeForCoeff_62_42_6(%example.y) {parameters = {}} : (i32) -> i32 # CHECK: %example2.y_0 = hw.instance "example2" @PolyComputeForCoeff_1_2_3_4_5(%example.y) {parameters = {}} : (i32) -> i32 # CHECK: %pycde.CoolPolynomialCompute.y = hw.instance "pycde.CoolPolynomialCompute" @supercooldevice(%c23_i32) {coefficients = [4, 42], parameters = {}} : (i32) -> i32 # CHECK-LABEL: hw.module @PolyComputeForCoeff_62_42_6(%x: i32) -> (%y: i32) # CHECK: hw.constant 62 # CHECK: hw.constant 42 # CHECK: hw.constant 6 # CHECK-LABEL: hw.module @PolyComputeForCoeff_1_2_3_4_5(%x: i32) -> (%y: i32) # CHECK: hw.constant 1 # CHECK: hw.constant 2 # CHECK: hw.constant 3 # CHECK: hw.constant 4 # CHECK: hw.constant 5 # CHECK-NOT: hw.module @pycde.PolynomialCompute print("\n\n=== Verilog ===") # CHECK-LABEL: === Verilog === poly.print_verilog() # CHECK-LABEL: module PolyComputeForCoeff_62_42_6( # CHECK: input [31:0] x, # CHECK: output [31:0] y);
2.703125
3
python/examples/service_discovery.py
davidgcameron/arc
0
2572
#! /usr/bin/env python import arc import sys import os def retrieve(uc, endpoints): # The ComputingServiceRetriever needs the UserConfig to know which credentials # to use in case of HTTPS connections retriever = arc.ComputingServiceRetriever(uc, endpoints) # the constructor of the ComputingServiceRetriever returns immediately sys.stdout.write('\n') sys.stdout.write("ComputingServiceRetriever created with the following endpoints:\n") for endpoint in endpoints: sys.stdout.write("- %s\n"%endpoint.str()) # here we want to wait until all the results arrive sys.stdout.write("Waiting for the results...\n") retriever.wait() return retriever def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Query two registries (index servers) for Computing Services registries = [ # for the index1, we specify that it is an EGIIS service arc.Endpoint("index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", arc.Endpoint.REGISTRY, "org.nordugrid.ldapegiis"), # for the arc-emi.grid.upjs.sk, we don't specify the type (the InterfaceName) # we let the system to try all possibilities arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY) ] retriever = retrieve(uc, registries) # The retriever acts as a list containing all the discovered ComputingServices: sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever]))) # Get all the ExecutionTargets on these ComputingServices targets = retriever.GetExecutionTargets() sys.stdout.write("Number of ExecutionTargets on these ComputingServices: %d\n"%len(targets)) # Query the local infosys (COMPUTINGINFO) of computing elements computing_elements = [ # for piff, we specify that we want to query the LDAP GLUE2 tree arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2"), # for pgs03, we don't specify the interface, we let the system try all possibilities arc.Endpoint("pgs03.grid.upjs.sk", arc.Endpoint.COMPUTINGINFO) ] retriever2 = retrieve(uc, computing_elements) # Get all the ExecutionTargets on these ComputingServices targets2 = retriever2.GetExecutionTargets() sys.stdout.write("The discovered ExecutionTargets:\n") for target in targets2: sys.stdout.write("%s\n"%str(target)) # Query both registries and computing elements at the same time: endpoints = [ arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY), arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2") ] retriever3 = retrieve(uc, endpoints) sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever3]))) # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example()
2.515625
3
core/domain/rights_manager.py
netajik/oppia
0
2573
<reponame>netajik/oppia<gh_stars>0 # coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects and functions that manage rights for various user actions.""" import logging from constants import constants from core.domain import activity_services from core.domain import role_services from core.domain import subscription_services from core.domain import user_services from core.platform import models import feconf import utils current_user_services = models.Registry.import_current_user_services() (collection_models, exp_models,) = models.Registry.import_models([ models.NAMES.collection, models.NAMES.exploration ]) # IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve # backward-compatibility with previous exploration snapshots in the datastore. # Do not modify the definitions of CMD keys that already exist. CMD_CREATE_NEW = 'create_new' CMD_CHANGE_ROLE = 'change_role' CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status' CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status' CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability' CMD_RELEASE_OWNERSHIP = 'release_ownership' CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec' ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC ROLE_OWNER = 'owner' ROLE_EDITOR = 'editor' ROLE_TRANSLATOR = 'translator' ROLE_VIEWER = 'viewer' ROLE_NONE = 'none' ROLE_ADMIN = 'admin' ROLE_MODERATOR = 'moderator' class ActivityRights(object): """Domain object for the rights/publication status of an activity (an exploration or a collection). """ def __init__( self, exploration_id, owner_ids, editor_ids, translator_ids, viewer_ids, community_owned=False, cloned_from=None, status=ACTIVITY_STATUS_PRIVATE, viewable_if_private=False, first_published_msec=None): self.id = exploration_id self.owner_ids = owner_ids self.editor_ids = editor_ids self.translator_ids = translator_ids self.viewer_ids = viewer_ids self.community_owned = community_owned self.cloned_from = cloned_from self.status = status self.viewable_if_private = viewable_if_private self.first_published_msec = first_published_msec def validate(self): """Validates an ActivityRights object. Raises: utils.ValidationError: if any of the owners, editors, translators and viewers lists overlap, or if a community-owned exploration has owners, editors, translators or viewers specified. """ if self.community_owned: if (self.owner_ids or self.editor_ids or self.translator_ids or self.viewer_ids): raise utils.ValidationError( 'Community-owned explorations should have no owners, ' 'editors, translators or viewers specified.') if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE: raise utils.ValidationError( 'Community-owned explorations cannot be private.') if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids: raise utils.ValidationError( 'Public explorations should have no viewers specified.') owner_editor = set(self.owner_ids).intersection(set(self.editor_ids)) owner_translator = set(self.owner_ids).intersection( set(self.translator_ids)) owner_viewer = set(self.owner_ids).intersection(set(self.viewer_ids)) editor_translator = set(self.editor_ids).intersection( set(self.translator_ids)) editor_viewer = set(self.editor_ids).intersection(set(self.viewer_ids)) translator_viewer = set(self.editor_ids).intersection( set(self.viewer_ids)) if owner_editor: raise utils.ValidationError( 'A user cannot be both an owner and an editor: %s' % owner_editor) if owner_translator: raise utils.ValidationError( 'A user cannot be both an owner and a translator: %s' % owner_translator) if owner_viewer: raise utils.ValidationError( 'A user cannot be both an owner and a viewer: %s' % owner_viewer) if editor_translator: raise utils.ValidationError( 'A user cannot be both an editor and a translator: %s' % editor_translator) if editor_viewer: raise utils.ValidationError( 'A user cannot be both an editor and a viewer: %s' % editor_viewer) if translator_viewer: raise utils.ValidationError( 'A user cannot be both a translator and a viewer: %s' % translator_viewer) def to_dict(self): """Returns a dict suitable for use by the frontend. Returns: dict. A dict version of ActivityRights suitable for use by the frontend. """ if self.community_owned: return { 'cloned_from': self.cloned_from, 'status': self.status, 'community_owned': True, 'owner_names': [], 'editor_names': [], 'translator_names': [], 'viewer_names': [], 'viewable_if_private': self.viewable_if_private, } else: return { 'cloned_from': self.cloned_from, 'status': self.status, 'community_owned': False, 'owner_names': user_services.get_human_readable_user_ids( self.owner_ids), 'editor_names': user_services.get_human_readable_user_ids( self.editor_ids), 'translator_names': user_services.get_human_readable_user_ids( self.translator_ids), 'viewer_names': user_services.get_human_readable_user_ids( self.viewer_ids), 'viewable_if_private': self.viewable_if_private, } def is_owner(self, user_id): """Checks whether given user is owner of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity owner. """ return bool(user_id in self.owner_ids) def is_editor(self, user_id): """Checks whether given user is editor of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity editor. """ return bool(user_id in self.editor_ids) def is_translator(self, user_id): """Checks whether given user is translator of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity translator. """ return bool(user_id in self.translator_ids) def is_viewer(self, user_id): """Checks whether given user is viewer of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity viewer. """ return bool(user_id in self.viewer_ids) def is_published(self): """Checks whether activity is published. Returns: bool. Whether activity is published. """ return bool(self.status == ACTIVITY_STATUS_PUBLIC) def is_private(self): """Checks whether activity is private. Returns: bool. Whether activity is private. """ return bool(self.status == ACTIVITY_STATUS_PRIVATE) def get_activity_rights_from_model(activity_rights_model, activity_type): """Constructs an ActivityRights object from the given activity rights model. Args: activity_rights_model: ActivityRightsModel. Activity rights from the datastore. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Returns: ActivityRights. The rights object created from the model. """ return ActivityRights( activity_rights_model.id, activity_rights_model.owner_ids, activity_rights_model.editor_ids, activity_rights_model.translator_ids, activity_rights_model.viewer_ids, community_owned=activity_rights_model.community_owned, cloned_from=( activity_rights_model.cloned_from if activity_type == constants.ACTIVITY_TYPE_EXPLORATION else None), status=activity_rights_model.status, viewable_if_private=activity_rights_model.viewable_if_private, first_published_msec=activity_rights_model.first_published_msec ) def _save_activity_rights( committer_id, activity_rights, activity_type, commit_message, commit_cmds): """Saves an ExplorationRights or CollectionRights domain object to the datastore. Args: committer_id: str. ID of the committer. activity_rights: ActivityRights. The rights object for the given activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION commit_message: str. Descriptive message for the commit. commit_cmds: list(dict). A list of commands describing what kind of commit was done. """ activity_rights.validate() if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: model_cls = exp_models.ExplorationRightsModel elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: model_cls = collection_models.CollectionRightsModel model = model_cls.get(activity_rights.id, strict=False) model.owner_ids = activity_rights.owner_ids model.editor_ids = activity_rights.editor_ids model.viewer_ids = activity_rights.viewer_ids model.translator_ids = activity_rights.translator_ids model.community_owned = activity_rights.community_owned model.status = activity_rights.status model.viewable_if_private = activity_rights.viewable_if_private model.first_published_msec = activity_rights.first_published_msec model.commit(committer_id, commit_message, commit_cmds) def _update_exploration_summary(activity_rights): """Updates the exploration summary for the activity associated with the given rights object. The ID of rights object is the same as the ID of associated activity. Args: activity_rights: ActivityRights. The rights object for the given activity. """ # TODO(msl): get rid of inline imports by refactoring code. from core.domain import exp_services exp_services.update_exploration_summary( activity_rights.id, None) def _update_collection_summary(activity_rights): """Updates the collection summary for the given activity associated with the given rights object. The ID of rights object is the same as the ID of associated activity. Args: activity_rights: ActivityRights. The rights object for the given activity. """ from core.domain import collection_services collection_services.update_collection_summary( activity_rights.id, None) def _update_activity_summary(activity_type, activity_rights): """Updates the activity summary for the given activity associated with the given rights object. The ID of rights object is the same as the ID of associated activity. Args: activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION activity_rights: ActivityRights. The rights object for the given activity. """ if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: _update_exploration_summary(activity_rights) elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: _update_collection_summary(activity_rights) def update_activity_first_published_msec( activity_type, activity_id, first_published_msec): """Updates the first_published_msec field for the given activity. The caller is responsible for ensuring that this value is not already set before updating it. Args: activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION activity_id: str. ID of the activity. first_published_msec: float. First publication time in milliseconds since the Epoch. """ activity_rights = _get_activity_rights(activity_type, activity_id) commit_cmds = [{ 'cmd': CMD_UPDATE_FIRST_PUBLISHED_MSEC, 'old_first_published_msec': activity_rights.first_published_msec, 'new_first_published_msec': first_published_msec }] activity_rights.first_published_msec = first_published_msec _save_activity_rights( feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type, 'set first published time in msec', commit_cmds) def create_new_exploration_rights(exploration_id, committer_id): """Creates a new exploration rights object and saves it to the datastore. Subscribes the committer to the new exploration. Args: exploration_id: str. ID of the exploration. committer_id: str. ID of the committer. """ exploration_rights = ActivityRights( exploration_id, [committer_id], [], [], []) commit_cmds = [{'cmd': CMD_CREATE_NEW}] exp_models.ExplorationRightsModel( id=exploration_rights.id, owner_ids=exploration_rights.owner_ids, editor_ids=exploration_rights.editor_ids, translator_ids=exploration_rights.translator_ids, viewer_ids=exploration_rights.viewer_ids, community_owned=exploration_rights.community_owned, status=exploration_rights.status, viewable_if_private=exploration_rights.viewable_if_private, first_published_msec=exploration_rights.first_published_msec, ).commit(committer_id, 'Created new exploration', commit_cmds) subscription_services.subscribe_to_exploration( committer_id, exploration_id) def get_exploration_rights(exploration_id, strict=True): """Retrieves the rights for this exploration from the datastore. Args: exploration_id: str. ID of the exploration. strict: bool. Whether to raise an error if there is no exploration matching the given ID. Returns: ActivityRights. The rights object for the given exploration. Raises: EntityNotFoundError. The exploration with ID exploration_id was not found in the datastore. """ model = exp_models.ExplorationRightsModel.get( exploration_id, strict=strict) if model is None: return None return get_activity_rights_from_model( model, constants.ACTIVITY_TYPE_EXPLORATION) def get_multiple_exploration_rights_by_ids(exp_ids): """Returns a list of ActivityRights objects for given exploration ids. Args: exp_ids: list(str). List of exploration ids. Returns: list(ActivityRights or None). List of rights object containing ActivityRights object for existing exploration or None. """ exp_rights_models = exp_models.ExplorationRightsModel.get_multi( exp_ids) exp_models_list = [] for model in exp_rights_models: if model is None: exp_models_list.append(None) else: exp_models_list.append( get_activity_rights_from_model( model, constants.ACTIVITY_TYPE_EXPLORATION)) return exp_models_list def is_exploration_private(exploration_id): """Returns whether exploration is private. Args: exploration_id: str. ID of the exploration. Returns: bool. Whether the exploration is private or not. """ exploration_rights = get_exploration_rights(exploration_id) return exploration_rights.status == ACTIVITY_STATUS_PRIVATE def is_exploration_public(exploration_id): """Returns whether exploration is public. Args: exploration_id: str. ID of the exploration. Returns: bool. Whether the exploration is public. """ exploration_rights = get_exploration_rights(exploration_id) return exploration_rights.status == ACTIVITY_STATUS_PUBLIC def is_exploration_cloned(exploration_id): """Returns whether the exploration is a clone of another exploration. Args: exploration_id: str. ID of the exploration. Returns: bool. Whether the exploration is a clone of another exploration. """ exploration_rights = get_exploration_rights(exploration_id) return bool(exploration_rights.cloned_from) def create_new_collection_rights(collection_id, committer_id): """Creates a new collection rights object and saves it to the datastore. Subscribes the committer to the new collection. Args: collection_id: str. ID of the collection. committer_id: str. ID of the committer. """ collection_rights = ActivityRights( collection_id, [committer_id], [], [], []) commit_cmds = [{'cmd': CMD_CREATE_NEW}] collection_models.CollectionRightsModel( id=collection_rights.id, owner_ids=collection_rights.owner_ids, editor_ids=collection_rights.editor_ids, translator_ids=collection_rights.translator_ids, viewer_ids=collection_rights.viewer_ids, community_owned=collection_rights.community_owned, status=collection_rights.status, viewable_if_private=collection_rights.viewable_if_private, first_published_msec=collection_rights.first_published_msec ).commit(committer_id, 'Created new collection', commit_cmds) subscription_services.subscribe_to_collection(committer_id, collection_id) def get_collection_rights(collection_id, strict=True): """Retrieves the rights for this collection from the datastore. Args: collection_id: str. ID of the collection. strict: bool. Whether to raise an error if ID is not found. Returns: ActivityRights. The rights object for the collection. Raises: EntityNotFoundError. The collection with ID collection_id is not found in the datastore. """ model = collection_models.CollectionRightsModel.get( collection_id, strict=strict) if model is None: return None return get_activity_rights_from_model( model, constants.ACTIVITY_TYPE_COLLECTION) def get_collection_owner_names(collection_id): """Retrieves the owners for this collection from the datastore. Args: collection_id: str. ID of the collection. Returns: list(str). Human-readable usernames (or truncated email addresses) of owners for this collection. """ collection_rights = get_collection_rights(collection_id) return user_services.get_human_readable_user_ids( collection_rights.owner_ids) def is_collection_private(collection_id): """Returns whether the collection is private. Args: collection_id: str. ID of the collection. Returns: bool. Whether the collection is private. """ collection_rights = get_collection_rights(collection_id) return collection_rights.status == ACTIVITY_STATUS_PRIVATE def is_collection_public(collection_id): """Returns whether the collection is public. Args: collection_id: str. ID of the collection. Returns: bool. Whether the collection is public. """ collection_rights = get_collection_rights(collection_id) return collection_rights.status == ACTIVITY_STATUS_PUBLIC def _get_activity_rights(activity_type, activity_id): """Retrieves the rights object for the given activity based on its type. Args: activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION activity_id: str. ID of the activity. Returns: ActivityRights. The rights object associated with the given activity. Raises: Exception. activity_type provided is unknown. """ if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: return get_exploration_rights(activity_id, strict=False) elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: return get_collection_rights(activity_id, strict=False) else: raise Exception( 'Cannot get activity rights for unknown activity type: %s' % ( activity_type)) def check_can_access_activity(user, activity_rights): """Checks whether the user can access given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: AcitivityRights or None. Rights object for the given activity. Returns: bool. Whether the given activity can be accessed by the given user. """ if activity_rights is None: return False elif activity_rights.is_published(): return bool( role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY in user.actions) elif activity_rights.is_private(): return bool( (role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions) or activity_rights.is_viewer(user.user_id) or activity_rights.is_owner(user.user_id) or activity_rights.is_editor(user.user_id) or activity_rights.is_translator(user.user_id) or activity_rights.viewable_if_private) def check_can_edit_activity(user, activity_rights): """Checks whether the user can edit given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the given user can edit this activity. """ if activity_rights is None: return False if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions: return False if (activity_rights.is_owner(user.user_id) or activity_rights.is_editor(user.user_id)): return True if (activity_rights.community_owned or (role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)): return True if (activity_rights.is_published() and (role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in user.actions)): return True return False def check_can_translate_activity(user, activity_rights): """Checks whether the user can translate given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the given user can translate this activity. """ if activity_rights is None: return False if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions: return False if (activity_rights.is_owner(user.user_id) or activity_rights.is_editor(user.user_id) or activity_rights.is_translator(user.user_id)): return True if (activity_rights.community_owned or (role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)): return True if (activity_rights.is_published() and (role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in user.actions)): return True return False def check_can_delete_activity(user, activity_rights): """Checks whether the user can delete given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can delete given activity. """ if activity_rights is None: return False if role_services.ACTION_DELETE_ANY_ACTIVITY in user.actions: return True elif (activity_rights.is_private() and (role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions) and activity_rights.is_owner(user.user_id)): return True elif (activity_rights.is_published() and (role_services.ACTION_DELETE_ANY_PUBLIC_ACTIVITY in user.actions)): return True return False def check_can_modify_activity_roles(user, activity_rights): """Checks whether the user can modify roles for given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can modify roles for given activity. """ if activity_rights is None: return False if (activity_rights.community_owned or activity_rights.cloned_from): return False if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY in user.actions): return True if (role_services.ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY in user.actions): if activity_rights.is_owner(user.user_id): return True return False def check_can_release_ownership(user, activity_rights): """Checks whether the user can release ownership for given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can release ownership for given activity. """ if activity_rights is None: return False if activity_rights.is_private(): return False return check_can_modify_activity_roles( user, activity_rights) def check_can_publish_activity(user, activity_rights): """Checks whether the user can publish given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can publish given activity. """ if activity_rights is None: return False if activity_rights.cloned_from: return False if activity_rights.is_published(): return False if role_services.ACTION_PUBLISH_ANY_ACTIVITY in user.actions: return True if role_services.ACTION_PUBLISH_OWNED_ACTIVITY in user.actions: if activity_rights.is_owner(user.user_id): return True return False def check_can_unpublish_activity(user, activity_rights): """Checks whether the user can unpublish given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can unpublish given activity. """ if activity_rights is None: return False if activity_rights.community_owned: return False if activity_rights.is_published(): if role_services.ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY in user.actions: return True return False def _assign_role( committer, assignee_id, new_role, activity_id, activity_type): """Assigns a new role to the user. Args: committer: UserActionsInfo. UserActionInfo object for the user who is performing the action. assignee_id: str. ID of the user whose role is being changed. new_role: str. The name of the new role: One of ROLE_OWNER ROLE_EDITOR ROLE_TRANSLATOR ROLE_VIEWER activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raises: Exception. The committer does not have rights to modify a role. Exception. The user already owns the activity. Exception. The user can already edit the activity. Exception. The user can already translate the activity. Exception. The activity is already publicly editable. Exception. The activity is already publicly translatable. Exception. The user can already view the activity. Exception. The activity is already publicly viewable. Exception. The role is invalid. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_modify_activity_roles(committer, activity_rights): logging.error( 'User %s tried to allow user %s to be a(n) %s of activity %s ' 'but was refused permission.' % ( committer_id, assignee_id, new_role, activity_id)) raise Exception( 'UnauthorizedUserException: Could not assign new role.') assignee_username = user_services.get_username(assignee_id) old_role = ROLE_NONE if new_role == ROLE_OWNER: if activity_rights.is_owner(assignee_id): raise Exception('This user already owns this %s.' % activity_type) activity_rights.owner_ids.append(assignee_id) if assignee_id in activity_rights.viewer_ids: activity_rights.viewer_ids.remove(assignee_id) old_role = ROLE_VIEWER if assignee_id in activity_rights.editor_ids: activity_rights.editor_ids.remove(assignee_id) old_role = ROLE_EDITOR if assignee_id in activity_rights.translator_ids: activity_rights.translator_ids.remove(assignee_id) old_role = ROLE_TRANSLATOR elif new_role == ROLE_EDITOR: if (activity_rights.is_editor(assignee_id) or activity_rights.is_owner(assignee_id)): raise Exception( 'This user already can edit this %s.' % activity_type) if activity_rights.community_owned: raise Exception( 'Community-owned %ss can be edited by anyone.' % activity_type) activity_rights.editor_ids.append(assignee_id) if assignee_id in activity_rights.translator_ids: activity_rights.translator_ids.remove(assignee_id) old_role = ROLE_TRANSLATOR if assignee_id in activity_rights.viewer_ids: activity_rights.viewer_ids.remove(assignee_id) old_role = ROLE_VIEWER elif new_role == ROLE_TRANSLATOR: if (activity_rights.is_editor(assignee_id) or activity_rights.is_translator(assignee_id) or activity_rights.is_owner(assignee_id)): raise Exception( 'This user already can translate this %s.' % activity_type) if activity_rights.community_owned: raise Exception( 'Community-owned %ss can be translated by anyone.' % activity_type) activity_rights.translator_ids.append(assignee_id) if assignee_id in activity_rights.viewer_ids: activity_rights.viewer_ids.remove(assignee_id) old_role = ROLE_VIEWER elif new_role == ROLE_VIEWER: if (activity_rights.is_owner(assignee_id) or activity_rights.is_editor(assignee_id) or activity_rights.is_viewer(assignee_id)): raise Exception( 'This user already can view this %s.' % activity_type) if activity_rights.status != ACTIVITY_STATUS_PRIVATE: raise Exception( 'Public %ss can be viewed by anyone.' % activity_type) activity_rights.viewer_ids.append(assignee_id) else: raise Exception('Invalid role: %s' % new_role) commit_message = 'Changed role of %s from %s to %s' % ( assignee_username, old_role, new_role) commit_cmds = [{ 'cmd': CMD_CHANGE_ROLE, 'assignee_id': assignee_id, 'old_role': old_role, 'new_role': new_role }] _save_activity_rights( committer_id, activity_rights, activity_type, commit_message, commit_cmds) _update_activity_summary(activity_type, activity_rights) def _release_ownership_of_activity(committer, activity_id, activity_type): """Releases ownership of the given activity to the community. Args: committer: UserActionsInfo. UserActionsInfo object for the user who is performing the action. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raise: Exception. The committer does not have release rights. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_release_ownership(committer, activity_rights): logging.error( 'User %s tried to release ownership of %s %s but was ' 'refused permission.' % (committer_id, activity_type, activity_id)) raise Exception( 'The ownership of this %s cannot be released.' % activity_type) activity_rights.community_owned = True activity_rights.owner_ids = [] activity_rights.editor_ids = [] activity_rights.viewer_ids = [] commit_cmds = [{ 'cmd': CMD_RELEASE_OWNERSHIP, }] _save_activity_rights( committer_id, activity_rights, activity_type, '%s ownership released to the community.' % activity_type, commit_cmds) _update_activity_summary(activity_type, activity_rights) def _change_activity_status( committer_id, activity_id, activity_type, new_status, commit_message): """Changes the status of the given activity. Args: committer_id: str. ID of the user who is performing the update action. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION new_status: str. The new status of the activity. commit_message: str. The human-written commit message for this change. """ activity_rights = _get_activity_rights(activity_type, activity_id) old_status = activity_rights.status activity_rights.status = new_status if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: cmd_type = CMD_CHANGE_EXPLORATION_STATUS elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: cmd_type = CMD_CHANGE_COLLECTION_STATUS commit_cmds = [{ 'cmd': cmd_type, 'old_status': old_status, 'new_status': new_status }] if new_status != ACTIVITY_STATUS_PRIVATE: activity_rights.viewer_ids = [] if activity_rights.first_published_msec is None: activity_rights.first_published_msec = ( utils.get_current_time_in_millisecs()) _save_activity_rights( committer_id, activity_rights, activity_type, commit_message, commit_cmds) _update_activity_summary(activity_type, activity_rights) def _publish_activity(committer, activity_id, activity_type): """Publishes the given activity. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raises: Exception. The committer does not have rights to publish the activity. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_publish_activity(committer, activity_rights): logging.error( 'User %s tried to publish %s %s but was refused ' 'permission.' % (committer_id, activity_type, activity_id)) raise Exception('This %s cannot be published.' % activity_type) _change_activity_status( committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC, '%s published.' % activity_type) def _unpublish_activity(committer, activity_id, activity_type): """Unpublishes the given activity. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raises: Exception. The committer does not have rights to unpublish the activity. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_unpublish_activity(committer, activity_rights): logging.error( 'User %s tried to unpublish %s %s but was refused ' 'permission.' % (committer_id, activity_type, activity_id)) raise Exception('This %s cannot be unpublished.' % activity_type) _change_activity_status( committer_id, activity_id, activity_type, ACTIVITY_STATUS_PRIVATE, '%s unpublished.' % activity_type) activity_services.remove_featured_activity(activity_type, activity_id) # Rights functions for activities. def assign_role_for_exploration( committer, exploration_id, assignee_id, new_role): """Assigns a user to the given role and subscribes the assignee to future exploration updates. The caller should ensure that assignee_id corresponds to a valid user in the system. Args: committer: UserActionsInfo. The UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. assignee_id: str. ID of the user whose role is being changed. new_role: str. The name of the new role: One of ROLE_OWNER ROLE_EDITOR ROLE_TRANSLATOR Raises: Exception. This could potentially throw an exception from _assign_role. """ _assign_role( committer, assignee_id, new_role, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) if new_role in [ROLE_OWNER, ROLE_EDITOR, ROLE_TRANSLATOR]: subscription_services.subscribe_to_exploration( assignee_id, exploration_id) def release_ownership_of_exploration(committer, exploration_id): """Releases ownership of the given exploration to the community. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. Raises: Exception. This could potentially throw an exception from _release_ownership_of_activity. """ _release_ownership_of_activity( committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) def set_private_viewability_of_exploration( committer, exploration_id, viewable_if_private): """Sets the viewable_if_private attribute for the given exploration's rights object. If viewable_if_private is True, this allows a private exploration to be viewed by anyone with the link. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. viewable_if_private: bool. Whether the exploration should be made viewable (by anyone with the link). Raises: Exception. The committer does not have the permission to perform change action. Exception. If the viewable_if_private property is already as desired. """ committer_id = committer.user_id exploration_rights = get_exploration_rights(exploration_id) # The user who can publish activity can change its private viewability. if not check_can_publish_activity(committer, exploration_rights): logging.error( 'User %s tried to change private viewability of exploration %s ' 'but was refused permission.' % (committer_id, exploration_id)) raise Exception( 'The viewability status of this exploration cannot be changed.') old_viewable_if_private = exploration_rights.viewable_if_private if old_viewable_if_private == viewable_if_private: raise Exception( 'Trying to change viewability status of this exploration to %s, ' 'but that is already the current value.' % viewable_if_private) exploration_rights.viewable_if_private = viewable_if_private commit_cmds = [{ 'cmd': CMD_CHANGE_PRIVATE_VIEWABILITY, 'old_viewable_if_private': old_viewable_if_private, 'new_viewable_if_private': viewable_if_private, }] commit_message = ( 'Made exploration viewable to anyone with the link.' if viewable_if_private else 'Made exploration viewable only to invited playtesters.') _save_activity_rights( committer_id, exploration_rights, constants.ACTIVITY_TYPE_EXPLORATION, commit_message, commit_cmds) _update_exploration_summary(exploration_rights) def publish_exploration(committer, exploration_id): """Publishes the given exploration. It is the responsibility of the caller to check that the exploration is valid prior to publication. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. Raises: Exception. This could potentially throw an exception from _publish_activity. """ _publish_activity( committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) def unpublish_exploration(committer, exploration_id): """Unpublishes the given exploration. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. Raises: Exception. This could potentially throw an exception from _unpublish_activity. """ _unpublish_activity( committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) # Rights functions for collections. def assign_role_for_collection( committer, collection_id, assignee_id, new_role): """Assign the given user to the given role and subscribes the assignee to future collection updates. The caller should ensure that assignee_id corresponds to a valid user in the system. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. assignee_id: str. ID of the user whose role is being changed. new_role: str. The name of the new role: One of ROLE_OWNER ROLE_EDITOR Raises: Exception. This could potentially throw an exception from _assign_role. """ _assign_role( committer, assignee_id, new_role, collection_id, constants.ACTIVITY_TYPE_COLLECTION) if new_role in [ROLE_OWNER, ROLE_EDITOR]: subscription_services.subscribe_to_collection( assignee_id, collection_id) def release_ownership_of_collection(committer, collection_id): """Releases ownership of the given collection to the community. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. Raises: Exception. This could potentially throw an exception from _release_ownership_of_activity. """ _release_ownership_of_activity( committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION) def publish_collection(committer, collection_id): """Publishes the given collection. It is the responsibility of the caller to check that the collection is valid prior to publication. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. Raises: Exception. This could potentially throw an exception from _publish_activity. """ _publish_activity( committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION) def unpublish_collection(committer, collection_id): """Unpublishes the given collection. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. Raises: Exception. This could potentially throw an exception from _unpublish_activity. """ _unpublish_activity( committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
1.414063
1
pkg_resources/_vendor/packaging/_typing.py
GDGSNF/setuptools
0
2574
"""For neatly implementing static typing in packaging. `mypy` - the static type analysis tool we use - uses the `typing` module, which provides core functionality fundamental to mypy's functioning. Generally, `typing` would be imported at runtime and used in that fashion - it acts as a no-op at runtime and does not have any run-time overhead by design. As it turns out, `typing` is not vendorable - it uses separate sources for Python 2/Python 3. Thus, this codebase can not expect it to be present. To work around this, mypy allows the typing import to be behind a False-y optional to prevent it from running at runtime and type-comments can be used to remove the need for the types to be accessible directly during runtime. This module provides the False-y guard in a nicely named fashion so that a curious maintainer can reach here to read this. In packaging, all static-typing related imports should be guarded as follows: from packaging._typing import TYPE_CHECKING if TYPE_CHECKING: from typing import ... Ref: https://github.com/python/mypy/issues/3216 """ __all__ = ["TYPE_CHECKING", "cast"] # The TYPE_CHECKING constant defined by the typing module is False at runtime # but True while type checking. TYPE_CHECKING = False # pragma: no cover # typing's cast syntax requires calling typing.cast at runtime, but we don't # want to import typing at runtime. Here, we inform the type checkers that # we're importing `typing.cast` as `cast` and re-implement typing.cast's # runtime behavior in a block that is ignored by type checkers. if TYPE_CHECKING: # pragma: no cover # not executed at runtime from typing import cast else: # executed at runtime def cast(type_, value): # noqa return value
2.59375
3
selfdrive/car/toyota/carcontroller.py
aolin480/openpilot
70
2575
<filename>selfdrive/car/toyota/carcontroller.py from cereal import car from common.numpy_fast import clip, interp from selfdrive.car import apply_toyota_steer_torque_limits, create_gas_interceptor_command, make_can_msg from selfdrive.car.toyota.toyotacan import create_steer_command, create_ui_command, \ create_accel_command, create_acc_cancel_command, \ create_fcw_command, create_lta_steer_command from selfdrive.car.toyota.values import CAR, STATIC_DSU_MSGS, NO_STOP_TIMER_CAR, TSS2_CAR, \ MIN_ACC_SPEED, PEDAL_TRANSITION, CarControllerParams from opendbc.can.packer import CANPacker from common.op_params import opParams VisualAlert = car.CarControl.HUDControl.VisualAlert class CarController(): def __init__(self, dbc_name, CP, VM): self.last_steer = 0 self.alert_active = False self.last_standstill = False self.standstill_req = False self.steer_rate_limited = False self.standstill_hack = opParams().get('standstill_hack') self.packer = CANPacker(dbc_name) self.gas = 0 self.accel = 0 def update(self, enabled, active, CS, frame, actuators, pcm_cancel_cmd, hud_alert, left_line, right_line, lead, left_lane_depart, right_lane_depart): # gas and brake if CS.CP.enableGasInterceptor and enabled: MAX_INTERCEPTOR_GAS = 0.5 # RAV4 has very sensitive gas pedal if CS.CP.carFingerprint in [CAR.RAV4, CAR.RAV4H, CAR.HIGHLANDER, CAR.HIGHLANDERH]: PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.15, 0.3, 0.0]) elif CS.CP.carFingerprint in [CAR.COROLLA]: PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.3, 0.4, 0.0]) else: PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.4, 0.5, 0.0]) # offset for creep and windbrake pedal_offset = interp(CS.out.vEgo, [0.0, 2.3, MIN_ACC_SPEED + PEDAL_TRANSITION], [-.4, 0.0, 0.2]) pedal_command = PEDAL_SCALE * (actuators.accel + pedal_offset) interceptor_gas_cmd = clip(pedal_command, 0., MAX_INTERCEPTOR_GAS) else: interceptor_gas_cmd = 0. pcm_accel_cmd = clip(actuators.accel, CarControllerParams.ACCEL_MIN, CarControllerParams.ACCEL_MAX) # steer torque new_steer = int(round(actuators.steer * CarControllerParams.STEER_MAX)) apply_steer = apply_toyota_steer_torque_limits(new_steer, self.last_steer, CS.out.steeringTorqueEps, CarControllerParams) self.steer_rate_limited = new_steer != apply_steer # Cut steering while we're in a known fault state (2s) if not enabled or CS.steer_state in [9, 25] or abs(CS.out.steeringRateDeg) > 100: apply_steer = 0 apply_steer_req = 0 else: apply_steer_req = 1 # TODO: probably can delete this. CS.pcm_acc_status uses a different signal # than CS.cruiseState.enabled. confirm they're not meaningfully different if not enabled and CS.pcm_acc_status: pcm_cancel_cmd = 1 # on entering standstill, send standstill request if CS.out.standstill and not self.last_standstill and CS.CP.carFingerprint not in NO_STOP_TIMER_CAR and not self.standstill_hack: self.standstill_req = True if CS.pcm_acc_status != 8: # pcm entered standstill or it's disabled self.standstill_req = False self.last_steer = apply_steer self.last_standstill = CS.out.standstill can_sends = [] #*** control msgs *** #print("steer {0} {1} {2} {3}".format(apply_steer, min_lim, max_lim, CS.steer_torque_motor) # toyota can trace shows this message at 42Hz, with counter adding alternatively 1 and 2; # sending it at 100Hz seem to allow a higher rate limit, as the rate limit seems imposed # on consecutive messages can_sends.append(create_steer_command(self.packer, apply_steer, apply_steer_req, frame)) if frame % 2 == 0 and CS.CP.carFingerprint in TSS2_CAR: can_sends.append(create_lta_steer_command(self.packer, 0, 0, frame // 2)) # LTA mode. Set ret.steerControlType = car.CarParams.SteerControlType.angle and whitelist 0x191 in the panda # if frame % 2 == 0: # can_sends.append(create_steer_command(self.packer, 0, 0, frame // 2)) # can_sends.append(create_lta_steer_command(self.packer, actuators.steeringAngleDeg, apply_steer_req, frame // 2)) # we can spam can to cancel the system even if we are using lat only control if (frame % 3 == 0 and CS.CP.openpilotLongitudinalControl) or pcm_cancel_cmd: lead = lead or CS.out.vEgo < 12. # at low speed we always assume the lead is present so ACC can be engaged # Lexus IS uses a different cancellation message if pcm_cancel_cmd and CS.CP.carFingerprint in [CAR.LEXUS_IS, CAR.LEXUS_RC]: can_sends.append(create_acc_cancel_command(self.packer)) elif CS.CP.openpilotLongitudinalControl: can_sends.append(create_accel_command(self.packer, pcm_accel_cmd, pcm_cancel_cmd, self.standstill_req, lead, CS.acc_type, CS.distance_btn)) self.accel = pcm_accel_cmd else: can_sends.append(create_accel_command(self.packer, 0, pcm_cancel_cmd, False, lead, CS.acc_type, CS.distance_btn)) if frame % 2 == 0 and CS.CP.enableGasInterceptor and CS.CP.openpilotLongitudinalControl: # send exactly zero if gas cmd is zero. Interceptor will send the max between read value and gas cmd. # This prevents unexpected pedal range rescaling can_sends.append(create_gas_interceptor_command(self.packer, interceptor_gas_cmd, frame // 2)) self.gas = interceptor_gas_cmd # ui mesg is at 100Hz but we send asap if: # - there is something to display # - there is something to stop displaying fcw_alert = hud_alert == VisualAlert.fcw steer_alert = hud_alert in [VisualAlert.steerRequired, VisualAlert.ldw] send_ui = False if ((fcw_alert or steer_alert) and not self.alert_active) or \ (not (fcw_alert or steer_alert) and self.alert_active): send_ui = True self.alert_active = not self.alert_active elif pcm_cancel_cmd: # forcing the pcm to disengage causes a bad fault sound so play a good sound instead send_ui = True if (frame % 100 == 0 or send_ui): can_sends.append(create_ui_command(self.packer, steer_alert, pcm_cancel_cmd, left_line, right_line, left_lane_depart, right_lane_depart, enabled)) if frame % 100 == 0 and CS.CP.enableDsu: can_sends.append(create_fcw_command(self.packer, fcw_alert)) # *** static msgs *** for (addr, cars, bus, fr_step, vl) in STATIC_DSU_MSGS: if frame % fr_step == 0 and CS.CP.enableDsu and CS.CP.carFingerprint in cars: can_sends.append(make_can_msg(addr, vl, bus)) new_actuators = actuators.copy() new_actuators.steer = apply_steer / CarControllerParams.STEER_MAX new_actuators.accel = self.accel new_actuators.gas = self.gas return new_actuators, can_sends
2.21875
2
app/auth/__init__.py
louisenje/Pitches
0
2576
from flask import Blueprint auth=Blueprint('auth',__name__) from .import views,forms
1.40625
1
forms/QRGenerator.py
Rono-Barto-Co/Project-QR
3
2577
<gh_stars>1-10 from flask_wtf import FlaskForm from wtforms import StringField, SubmitField, SelectField from wtforms.validators import DataRequired class QRGenerator(FlaskForm): code_content = StringField('Content', validators=[DataRequired()]) code_size = SelectField('Size', choices=[('15', 'Size'), ('5', '5'), ('10', '10'), ('15', '15'), ('20', '20'), ('25', '25'), ('30', '30')]) code_color = SelectField('Colour', choices=[('white', 'Colour'), ("white", "White"), ('yellow', "Yellow"), ('lime', "Green"), ("#ffa500", "Orange")]) code_correction = SelectField('Error Correction', choices=[("H", "Error Correction"), ("H", "H"), ("L", "L"), ("M", "M"), ("Q", "Q")]) code_image = StringField('Image URL') generate_code = SubmitField('Generate QR Code')
2.609375
3
Quiz/m2_advanced_quants/l5_volatility/volatility_estimation.py
jcrangel/AI-for-Trading
0
2578
import pandas as pd import numpy as np def estimate_volatility(prices, l): """Create an exponential moving average model of the volatility of a stock price, and return the most recent (last) volatility estimate. Parameters ---------- prices : pandas.Series A series of adjusted closing prices for a stock. l : float The 'lambda' parameter of the exponential moving average model. Making this value smaller will cause the model to weight older terms less relative to more recent terms. Returns ------- last_vol : float The last element of your exponential moving averge volatility model series. """ # TODO: Implement the exponential moving average volatility model and return the last value. return prices.ewm(alpha=(1-l)).mean()[-1] def test_run(filename='data.csv'): """Test run get_most_volatile() with stock prices from a file.""" prices = pd.read_csv(filename, parse_dates=[ 'date'], index_col='date', squeeze=True) print("Most recent volatility estimate: {:.6f}".format(estimate_volatility(prices, 0.7))) # print(estimate_volatility(prices, 0.7)) if __name__ == '__main__': test_run()
3.921875
4
jp.atcoder/abc122/abc122_c/9516079.py
kagemeka/atcoder-submissions
1
2579
import sys n, q = map(int, sys.stdin.readline().split()) s = '$' + sys.stdin.readline().rstrip() lr = zip(*[map(int, sys.stdin.read().split())] * 2) def main(): res = [None] * (n + 1); res[0] = 0 prev = '$' for i in range(1, n+1): res[i] = res[i-1] res[i] += (prev == 'A' and s[i] == 'C') & 1 prev = s[i] for l, r in lr: yield res[r] - res[l] if __name__ == '__main__': ans = main() print(*ans, sep='\n')
2.71875
3
Decoder.py
gokulsg/Attention-is-all-you-need-implementation-from-scratch
1
2580
<filename>Decoder.py import torch import torch.nn as nn from DecoderLayer import DecoderLayer import math class Decoder(nn.Module): def __init__(self, output_dim, embed_dim, num_layers, num_heads, expand_dim, dropout, device, max_length = 30): super().__init__() self.tok_embedding = nn.Embedding(output_dim, embed_dim) #self.pos_embedding = nn.Embedding(max_length, embed_dim) self.pos_embedding = nn.Embedding.from_pretrained(self.get_positional_encoding(max_length, embed_dim)) self.layers = nn.ModuleList([DecoderLayer(embed_dim, num_heads, expand_dim, dropout) for _ in range(num_layers)]) self.fc_out = nn.Linear(embed_dim, output_dim) self.dropout = nn.Dropout(dropout) self.scale = torch.sqrt(torch.FloatTensor([embed_dim])).to(device) self.device = device def forward(self, trg, enc_src, trg_mask, src_mask): #trg = [batch size, trg len] #enc_src = [batch size, src len, embed dim] #trg_mask = [batch size, 1, trg len, trg len] #src_mask = [batch size, 1, 1, src len] batch_size = trg.shape[0] trg_len = trg.shape[1] pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device) #pos = [batch size, trg len] trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos)) #trg = [batch size, trg len, embed dim] for layer in self.layers: trg = layer(trg, enc_src, trg_mask, src_mask) #trg = [batch size, trg len, embed dim] output = self.fc_out(trg) #output = [batch size, trg len, output dim] return output def get_positional_encoding(self, max_seq_len, embed_dim): pos_enc = torch.zeros(max_seq_len, embed_dim) position = torch.arange(0, max_seq_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)) pos_enc[:, 0::2] = torch.sin(position * div_term) pos_enc[:, 1::2] = torch.cos(position * div_term) return pos_enc
2.734375
3
salt/grains/nxos.py
babs/salt
9,425
2581
<gh_stars>1000+ """ Grains for Cisco NX-OS minions .. versionadded:: 2016.11.0 For documentation on setting up the nxos proxy minion look in the documentation for :mod:`salt.proxy.nxos<salt.proxy.nxos>`. """ import logging import salt.utils.nxos import salt.utils.platform from salt.exceptions import NxosClientError log = logging.getLogger(__name__) __proxyenabled__ = ["nxos"] __virtualname__ = "nxos" def __virtual__(): try: salt.utils.nxos.version_info() except NxosClientError as err: return False, err return __virtualname__ def system_information(proxy=None): if salt.utils.platform.is_proxy(): if proxy is None: return {} if proxy["nxos.initialized"]() is False: return {} return {"nxos": proxy["nxos.grains"]()} else: data = salt.utils.nxos.version_info() return salt.utils.nxos.system_info(data)
1.804688
2
tmsproviderapisdk/tms_device.py
tvip/tmsproviderapisdk
0
2582
<gh_stars>0 from typing import List, Optional, Tuple from tmsproviderapisdk.tms_extended_model import TmsExtendedModel class TmsDevice(TmsExtendedModel): _path_url = "/devices/" def __init__(self, unique_id: str, account: int, device_id: int = None, ipaddr: str = None, mac: str = None, remote_custom_field: str = None, comment: str = None, last_online: str = None, last_fw_ver: str = None, first_online: str = None, use_nat: bool = False, operation_system: str = None, udpxy_addr: str = None, device_type: int = None, provider: int = None): self.unique_id = unique_id self.account = account self.id = device_id self.ipaddr = ipaddr self.mac = mac self.remote_custom_field = remote_custom_field self.comment = comment self.last_online = last_online self.last_fw_ver = last_fw_ver self.first_online = first_online self.use_nat = use_nat self.operation_system = operation_system self.udpxy_addr = udpxy_addr self.device_type = device_type self.provider = provider @staticmethod def _dict_to_object(device_dict: dict) -> object: device = TmsDevice( unique_id=device_dict["unique_id"], device_id=device_dict["id"], ipaddr=device_dict["ipaddr"], mac=device_dict["mac"], remote_custom_field=device_dict["remote_custom_field"], comment=device_dict["comment"], last_online=device_dict["last_online"], last_fw_ver=device_dict["last_fw_ver"], first_online=device_dict["first_online"], use_nat=device_dict["use_nat"], operation_system=device_dict["operation_system"], udpxy_addr=device_dict["udpxy_addr"], device_type=device_dict["device_type"], provider=device_dict["provider"], account=device_dict["account"] ) return device @classmethod def get_list(cls, account: int = None, device_type: int = None, limit: int = 50, provider: int = None, quick_search: str = "", remote_custom_field: str = None, sort: str = "", start: int = 0, unique_id: str = "") -> Optional[Tuple[List[object], int]]: devices = super().get_list(start=start, limit=limit, account=account, device_type=device_type, provider=provider, quick_search=quick_search, remote_custom_field=remote_custom_field, sort=sort, unique_id=unique_id) return devices def __str__(self): return """id:{}, ipaddr:{}, mac:{}, unique_id:{}, remote_custom_field: {}, comment: {}, last_online: {}, \ last_fw_ver: {}, first_online: {}, use_nat: {}, operation_system: {}, \ udpxy_addr: {}, device_type: {}, provider: {}, account: {}""".format( self.id, self.ipaddr, self.mac, self.unique_id, self.remote_custom_field, self.comment, self.last_online, self.last_fw_ver, self.first_online, self.use_nat, self.operation_system, self.udpxy_addr, self.device_type, self.provider, self.account )
2.1875
2
fealty/fields.py
eddiejessup/fealty
0
2583
""" A class hierarchy relating to fields of all kinds. """ from __future__ import print_function, division import numpy as np from ciabatta.meta import make_repr_str from fealty import lattice, field_numerics, walled_field_numerics class Space(object): def __init__(self, L, dim): self.L = L self.dim = dim @property def L_half(self): return self.L / 2.0 @property def A(self): return self.L ** self.dim def iterate(self, *args, **kwargs): pass def __repr__(self): fs = [('L', self.L), ('dim', self.dim)] return make_repr_str(self, fs) class Field(Space): def __init__(self, L, dim, dx): Space.__init__(self, L, dim) self.M = int(round(self.L / dx)) @property def dx(self): return self.L / self.M @property def A_i(self): return self.M ** self.dim @property def dA(self): return self.dx ** self.dim def density_field(self, r): return density(r, self.L, self.dx) def r_to_i(self, r): return lattice.r_to_i(r, self.L, self.dx) def i_to_r(self, i): return lattice.i_to_r(i, self.L, self.dx) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx)] return make_repr_str(self, fs) class Scalar(Field): def __init__(self, L, dim, dx, a_0=0.0): Field.__init__(self, L, dim, dx) self.a = np.ones(self.dim * (self.M,), dtype=np.float) * a_0 def grad(self): return _grad(self.a, self.dx) def grad_i(self, r): return _grad_i(self.a, self.r_to_i(r), self.dx) def laplacian(self): return _laplace(self.a, self.dx) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('a_0', self.a_0)] return make_repr_str(self, fs) class Diffusing(Scalar): def __init__(self, L, dim, dx, D, dt, a_0=0.0): Scalar.__init__(self, L, dim, dx, a_0=a_0) self.D = D self.dt = dt if self.D > self.dx ** 2 / (2.0 * self.dim * self.dt): raise Exception('Unstable diffusion constant') def iterate(self): self.a += self.D * self.laplacian() * self.dt def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('D', self.D), ('dt', self.dt), ('a_0', self.a_0)] return make_repr_str(self, fs) class WalledScalar(Scalar): def __init__(self, L, dim, dx, walls, a_0=0.0): Scalar.__init__(self, L, dim, dx, a_0=a_0) self.walls = walls # Make field zero-valued where obstructed self.a *= np.logical_not(self.walls) def grad(self): return _walled_grad(self.a, self.dx, self.walls) def grad_i(self, r): return _walled_grad_i(self.a, self.r_to_i(r), self.dx, self.walls) def laplacian(self): return _walled_laplace(self.a, self.dx, self.walls) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('walls', self.walls), ('a_0', self.a_0)] return make_repr_str(self, fs) # Note, inheritance order matters to get walled grad & laplacian call # (see diamond problem on wikipedia and how python handles it) class WalledDiffusing(WalledScalar, Diffusing): def __init__(self, L, dim, dx, walls, D, dt, a_0=0.0): Diffusing.__init__(self, L, dim, dx, D, dt, a_0=a_0) WalledScalar.__init__(self, L, dim, dx, walls, a_0=a_0) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('walls', self.walls), ('D', self.D), ('dt', self.dt), ('a_0', self.a_0)] return make_repr_str(self, fs) def density(r, L, dx): assert r.ndim == 2 M = int(round(L / dx)) dx = L / M inds = lattice.r_to_i(r, L, dx) f = np.zeros(r.shape[1] * (M,), dtype=np.int) if f.ndim == 1: field_numerics.density_1d(inds, f) elif f.ndim == 2: field_numerics.density_2d(inds, f) elif f.ndim == 3: field_numerics.density_3d(inds, f) else: raise Exception('Density calc not implemented in this dimension') return f / dx ** r.shape[1] def _laplace(field, dx): assert dx > 0.0 laplace = np.empty_like(field) if field.ndim == 1: field_numerics.laplace_1d(field, laplace, dx) elif field.ndim == 2: field_numerics.laplace_2d(field, laplace, dx) elif field.ndim == 3: field_numerics.laplace_3d(field, laplace, dx) else: raise Exception('Laplacian not implemented in this dimension') return laplace def _grad_i(field, inds, dx): assert dx > 0.0 assert inds.ndim == 2 assert field.ndim == inds.shape[1] grad_i = np.empty(inds.shape, dtype=field.dtype) if field.ndim == 1: field_numerics.grad_i_1d(field, inds, grad_i, dx) elif field.ndim == 2: field_numerics.grad_i_2d(field, inds, grad_i, dx) elif field.ndim == 3: field_numerics.grad_i_3d(field, grad_i, dx) else: raise Exception("Grad_i not implemented in this dimension") return grad_i def _grad(field, dx): assert dx > 0.0 grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype) if field.ndim == 1: field_numerics.grad_1d(field, grad, dx) elif field.ndim == 2: field_numerics.grad_2d(field, grad, dx) elif field.ndim == 3: field_numerics.grad_3d(field, grad, dx) else: raise Exception('Grad not implemented in this dimension') return grad def _div(field, dx): assert dx > 0.0 div = np.empty(field.shape[:-1], dtype=field.dtype) if field.ndim == 2: field_numerics.div_1d(field, div, dx) elif field.ndim == 3: field_numerics.div_2d(field, div, dx) elif field.ndim == 4: field_numerics.div_3d(field, div, dx) else: raise Exception('Divergence not implemented in this dimension') return div def _walled_grad(field, dx, walls): assert field.shape == walls.shape assert dx > 0.0 grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype) if field.ndim == 1: walled_field_numerics.grad_1d(field, grad, dx, walls) elif field.ndim == 2: walled_field_numerics.grad_2d(field, grad, dx, walls) elif field.ndim == 3: walled_field_numerics.grad_3d(field, grad, dx, walls) else: raise Exception("Walled grad not implemented in this dimension") return grad def _walled_grad_i(field, inds, dx, walls): assert field.shape == walls.shape assert dx > 0.0 assert inds.ndim == 2 assert field.ndim == inds.shape[1] grad_i = np.empty(inds.shape, dtype=field.dtype) if field.ndim == 1: walled_field_numerics.grad_i_1d(field, inds, grad_i, dx, walls) elif field.ndim == 2: walled_field_numerics.grad_i_2d(field, inds, grad_i, dx, walls) elif field.ndim == 3: walled_field_numerics.grad_i_3d(field, inds, grad_i, dx, walls) else: raise Exception("Walled Grad_i not implemented in this dimension") return grad_i def _walled_laplace(field, dx, walls): assert field.shape == walls.shape assert dx > 0.0 laplace = np.empty_like(field) if field.ndim == 1: walled_field_numerics.laplace_1d(field, laplace, dx, walls) elif field.ndim == 2: walled_field_numerics.laplace_2d(field, laplace, dx, walls) elif field.ndim == 3: walled_field_numerics.laplace_3d(field, laplace, dx, walls) else: raise Exception('Laplacian not implemented in this dimension') return laplace
3.15625
3
examples/example_django/example_django/asgi.py
cpascariello/aleph-vm
19
2584
""" ASGI config for example_django project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") application = get_asgi_application() os.system("/usr/bin/python3 /opt/code/manage.py migrate") os.system("/usr/bin/python3 /opt/code/manage.py " "loaddata /opt/code/blog/fixtures/default_articles.json")
1.898438
2
lectures/extensions/hyperbolic_discounting/replication_code/src/analysis/get_bivariate_distr_data.py
loikein/ekw-lectures
4
2585
<filename>lectures/extensions/hyperbolic_discounting/replication_code/src/analysis/get_bivariate_distr_data.py """Generate values of Method of Simulated Moments criterion function. Given observed moments and weighting matrix in `OUT_ANALYSIS`, "msm_estimation", generate values of Method of Simulated Moments criterion function for combinations of discount factor and present bias values. The goal is to study the bivariate distribution of the time preference parameters around the combination of true parameter values. """ import itertools import numpy as np import pandas as pd import respy as rp import yaml from bld.project_paths import project_paths_join as ppj from src.library.compute_moments import _replace_nans from src.library.compute_moments import calc_restricted_choice_probabilities from src.library.compute_moments import calc_restricted_wage_distribution from src.library.compute_moments import calc_unrestricted_choice_probabilities from src.library.compute_moments import calc_unrestricted_wage_distribution from src.library.compute_moments import calc_very_restricted_choice_probabilities from src.library.compute_moments import calc_very_restricted_wage_distribution from src.library.housekeeping import _load_pickle from src.library.housekeeping import _temporary_working_directory from tqdm import tqdm def get_bivariate_distribution(params, crit_func, grid_delta, grid_beta): """Compute value of criterion function. Args: params (pd.DataFrame): DataFrame containing model parameters. crit_func (dict): Dictionary containing model options. grid_delta (np.array): Values of discount factor. grid_beta (np.array): Values of present-bias parameter. Returns: pd.DataFrame """ results = [] for beta, delta in tqdm(itertools.product(grid_beta, grid_delta)): params_ = params.copy() params_.loc[("beta", "beta"), "value"] = beta params_.loc[("delta", "delta"), "value"] = delta val = crit_func(params_) result = {"beta": beta, "delta": delta, "val": val} results.append(result) return pd.DataFrame.from_dict(results) if __name__ == "__main__": # load params params = pd.read_csv( ppj("IN_MODEL_SPECS", "params_hyp.csv"), sep=";", index_col=["category", "name"], ) params["value"] = params["value"].astype(float) # load options with open(ppj("IN_MODEL_SPECS", "options_hyp.yaml")) as options: options = yaml.safe_load(options) # get empirical moments empirical_moments = _load_pickle(ppj("OUT_ANALYSIS", "msm_estimation", "moments_hyp.pickle")) # get weighting matrix weighting_matrix = _load_pickle( ppj("OUT_ANALYSIS", "msm_estimation", "weighting_matrix_hyp.pickle") ) calc_moments = { "Choice Probabilities Very Restricted": calc_very_restricted_choice_probabilities, "Choice Probabilities Restricted": calc_restricted_choice_probabilities, "Choice Probabilities Unrestricted": calc_unrestricted_choice_probabilities, "Wage Distribution Very Restricted": calc_very_restricted_wage_distribution, "Wage Distribution Restricted": calc_restricted_wage_distribution, "Wage Distribution Unrestricted": calc_unrestricted_wage_distribution, } with _temporary_working_directory(snippet="heatmap"): # get criterion function weighted_sum_squared_errors = rp.get_moment_errors_func( params=params, options=options, calc_moments=calc_moments, replace_nans=_replace_nans, empirical_moments=empirical_moments, weighting_matrix=weighting_matrix, ) # get bivariate distribution results results = get_bivariate_distribution( crit_func=weighted_sum_squared_errors, params=params, grid_delta=np.arange(0.945, 0.9625, 0.0025), grid_beta=np.arange(0.75, 1.05, 0.01), ) results.to_csv(ppj("OUT_ANALYSIS", "heatmap.csv"))
2.828125
3
space_game/events/KeyPressedEvent.py
Iwomichu/probable-giggle
1
2586
from dataclasses import dataclass from space_game.domain_names import KeyId from space_game.events.Event import Event @dataclass class KeyPressedEvent(Event): key_id: KeyId
1.875
2
src/oncall/messengers/teams_messenger.py
navoday-91/oncall
857
2587
<gh_stars>100-1000 import pymsteams import logging from oncall.constants import TEAMS_SUPPORT class teams_messenger(object): supports = frozenset([TEAMS_SUPPORT]) def __init__(self, config): self.webhook = config['webhook'] def send(self, message): heading = message.get("subject") final_message = "User: " + message.get("user") + " Message: " + message.get("body") try: myTeamsMessage = pymsteams.connectorcard(self.webhook) myTeamsMessage.title(str(heading)) myTeamsMessage.text(str(final_message)) myTeamsMessage.send() except: logging.info("An issue occured while sending message to teams messenger")
2.328125
2
python/chronos/src/bigdl/chronos/autots/model/auto_prophet.py
Laniakea94/BigDL
3
2588
# + # # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp' # ress or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pandas as pd import warnings from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel from bigdl.chronos.autots.utils import recalculate_n_sampling # - class AutoProphet: def __init__(self, changepoint_prior_scale=None, seasonality_prior_scale=None, holidays_prior_scale=None, seasonality_mode=None, changepoint_range=None, metric='mse', logs_dir="/tmp/auto_prophet_logs", cpus_per_trial=1, name="auto_prophet", remote_dir=None, load_dir=None, **prophet_config ): """ Create an automated Prophet Model. User need to specify either the exact value or the search space of the Prophet model hyperparameters. For details of the Prophet model hyperparameters, refer to https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning. :param changepoint_prior_scale: Int or hp sampling function from an integer space for hyperparameter changepoint_prior_scale for the Prophet model. For hp sampling, see bigdl.chronos.orca.automl.hp for more details. e.g. hp.loguniform(0.001, 0.5). :param seasonality_prior_scale: hyperparameter seasonality_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param holidays_prior_scale: hyperparameter holidays_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param seasonality_mode: hyperparameter seasonality_mode for the Prophet model. e.g. hp.choice(['additive', 'multiplicative']). :param changepoint_range: hyperparameter changepoint_range for the Prophet model. e.g. hp.uniform(0.8, 0.95). :param metric: String. The evaluation metric name to optimize. e.g. "mse" :param logs_dir: Local directory to save logs and results. It defaults to "/tmp/auto_prophet_logs" :param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1. :param name: name of the AutoProphet. It defaults to "auto_prophet" :param remote_dir: String. Remote directory to sync training results and checkpoints. It defaults to None and doesn't take effects while running in local. While running in cluster, it defaults to "hdfs:///tmp/{name}". :param load_dir: Load the ckpt from load_dir. The value defaults to None. :param prophet_config: Other Prophet hyperparameters. """ if load_dir: self.best_model = ProphetModel() self.best_model.restore(load_dir) try: from bigdl.orca.automl.auto_estimator import AutoEstimator import bigdl.orca.automl.hp as hp self.search_space = { "changepoint_prior_scale": hp.grid_search([0.005, 0.05, 0.1, 0.5]) if changepoint_prior_scale is None else changepoint_prior_scale, "seasonality_prior_scale": hp.grid_search([0.01, 0.1, 1.0, 10.0]) if seasonality_prior_scale is None else seasonality_prior_scale, "holidays_prior_scale": hp.loguniform(0.01, 10) if holidays_prior_scale is None else holidays_prior_scale, "seasonality_mode": hp.choice(['additive', 'multiplicative']) if seasonality_mode is None else seasonality_mode, "changepoint_range": hp.uniform(0.8, 0.95) if changepoint_range is None else changepoint_range } self.search_space.update(prophet_config) # update other configs self.metric = metric model_builder = ProphetBuilder() self.auto_est = AutoEstimator(model_builder=model_builder, logs_dir=logs_dir, resources_per_trial={"cpu": cpus_per_trial}, remote_dir=remote_dir, name=name) except ImportError: warnings.warn("You need to install `bigdl-orca[automl]` to use `fit` function.") def fit(self, data, cross_validation=True, expect_horizon=None, freq=None, metric_threshold=None, n_sampling=16, search_alg=None, search_alg_params=None, scheduler=None, scheduler_params=None, ): """ Automatically fit the model and search for the best hyperparameters. :param data: training data, a pandas dataframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param cross_validation: bool, if the eval result comes from cross_validation. The value is set to True by default. Setting this option to False to speed up the process. :param expect_horizon: int, validation data will be automatically splited from training data, and expect_horizon is the horizon you may need to use once the mode is fitted. The value defaults to None, where 10% of training data will be taken as the validation data. :param freq: the freqency of the training dataframe. the frequency can be anything from the pandas list of frequency strings here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliasesDefaulted to None, where an unreliable frequency will be infer implicitly. :param metric_threshold: a trial will be terminated when metric threshold is met :param n_sampling: Number of trials to evaluate in total. Defaults to 16. If hp.grid_search is in search_space, the grid will be run n_sampling of trials and round up n_sampling according to hp.grid_search. If this is -1, (virtually) infinite samples are generated until a stopping condition is met. :param search_alg: str, all supported searcher provided by ray tune (i.e."variant_generator", "random", "ax", "dragonfly", "skopt", "hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and "sigopt") :param search_alg_params: extra parameters for searcher algorithm besides search_space, metric and searcher mode :param scheduler: str, all supported scheduler provided by ray tune :param scheduler_params: parameters for scheduler """ if expect_horizon is None: expect_horizon = int(0.1*len(data)) if freq is None: assert len(data) >= 2, "The training dataframe should contains more than 2 records." assert pd.api.types.is_datetime64_any_dtype(data["ds"].dtypes), \ "The 'ds' col should be in datetime 64 type, or you need to set `freq` in fit." self._freq = data["ds"].iloc[1] - data["ds"].iloc[0] else: self._freq = pd.Timedelta(freq) expect_horizon_str = str(self._freq * expect_horizon) self.search_space.update({"expect_horizon": expect_horizon_str, "cross_validation": cross_validation}) train_data = data if cross_validation else data[:len(data)-expect_horizon] validation_data = None if cross_validation else data[len(data)-expect_horizon:] n_sampling = recalculate_n_sampling(self.search_space, n_sampling) if n_sampling != -1 else -1 self.auto_est.fit(data=train_data, validation_data=validation_data, metric=self.metric, metric_threshold=metric_threshold, n_sampling=n_sampling, search_space=self.search_space, search_alg=search_alg, search_alg_params=search_alg_params, scheduler=scheduler, scheduler_params=scheduler_params ) # use the best config to fit a new prophet model on whole data self.best_model = ProphetBuilder().build(self.auto_est.get_best_config()) self.best_model.model.fit(data) def predict(self, horizon=1, freq="D", ds_data=None): """ Predict using the best model after HPO. :param horizon: the number of steps forward to predict :param freq: the freqency of the predicted dataframe, defaulted to day("D"), the frequency can be anything from the pandas list of frequency strings here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases :param ds_data: a dataframe that has 1 column 'ds' indicating date. """ if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling predict!") return self.best_model.predict(horizon=horizon, freq=freq, ds_data=ds_data) def evaluate(self, data, metrics=['mse']): """ Evaluate using the best model after HPO. :param data: evaluation data, a pandas dataframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param metrics: A list contains metrics for test/valid data. """ if data is None: raise ValueError("Input invalid data of None") if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling evaluate!") return self.best_model.evaluate(target=data, metrics=metrics) def save(self, checkpoint_file): """ Save the best model after HPO. :param checkpoint_file: The location you want to save the best model, should be a json file """ if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling save!") self.best_model.save(checkpoint_file) def restore(self, checkpoint_file): """ Restore the best model after HPO. :param checkpoint_file: The checkpoint file location you want to load the best model. """ self.best_model.restore(checkpoint_file) def get_best_model(self): """ Get the best Prophet model. """ return self.best_model.model
2.234375
2
nf/flows.py
arita37/normalizing-flows
1
2589
<filename>nf/flows.py import math import numpy as np import scipy as sp import scipy.linalg import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from nf.utils import unconstrained_RQS # supported non-linearities: note that the function must be invertible functional_derivatives = { torch.tanh: lambda x: 1 - torch.pow(torch.tanh(x), 2), F.leaky_relu: lambda x: (x > 0).type(torch.FloatTensor) + \ (x < 0).type(torch.FloatTensor) * -0.01, F.elu: lambda x: (x > 0).type(torch.FloatTensor) + \ (x < 0).type(torch.FloatTensor) * torch.exp(x) } class Planar(nn.Module): """ Planar flow. z = f(x) = x + u h(wᵀx + b) [<NAME> Mohamed, 2015] """ def __init__(self, dim, nonlinearity=torch.tanh): super().__init__() self.h = nonlinearity self.w = nn.Parameter(torch.Tensor(dim)) self.u = nn.Parameter(torch.Tensor(dim)) self.b = nn.Parameter(torch.Tensor(1)) self.reset_parameters(dim) def reset_parameters(self, dim): init.uniform_(self.w, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.u, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.b, -math.sqrt(1/dim), math.sqrt(1/dim)) def forward(self, x): """ Given x, returns z and the log-determinant log|df/dx|. Returns ------- """ if self.h in (F.elu, F.leaky_relu): u = self.u elif self.h == torch.tanh: scal = torch.log(1+torch.exp(self.w @ self.u)) - self.w @ self.u - 1 u = self.u + scal * self.w / torch.norm(self.w) else: raise NotImplementedError("Non-linearity is not supported.") lin = torch.unsqueeze(x @ self.w, 1) + self.b z = x + u * self.h(lin) phi = functional_derivatives[self.h](lin) * self.w log_det = torch.log(torch.abs(1 + phi @ u) + 1e-4) return z, log_det def backward(self, z): raise NotImplementedError("Planar flow has no algebraic inverse.") class Radial(nn.Module): """ Radial flow. z = f(x) = = x + β h(α, r)(z − z0) [Rezende and Mohamed 2015] """ def __init__(self, dim): super().__init__() self.x0 = nn.Parameter(torch.Tensor(dim)) self.log_alpha = nn.Parameter(torch.Tensor(1)) self.beta = nn.Parameter(torch.Tensor(1)) def reset_parameters(dim): init.uniform_(self.z0, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.log_alpha, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.beta, -math.sqrt(1/dim), math.sqrt(1/dim)) def forward(self, x): """ Given x, returns z and the log-determinant log|df/dx|. """ m, n = x.shape r = torch.norm(x - self.x0) h = 1 / (torch.exp(self.log_alpha) + r) beta = -torch.exp(self.log_alpha) + torch.log(1 + torch.exp(self.beta)) z = x + beta * h * (x - self.x0) log_det = (n - 1) * torch.log(1 + beta * h) + \ torch.log(1 + beta * h - \ beta * r / (torch.exp(self.log_alpha) + r) ** 2) return z, log_det class FCNN(nn.Module): """ Simple fully connected neural network. """ def __init__(self, in_dim, out_dim, hidden_dim): super().__init__() self.network = nn.Sequential( nn.Linear(in_dim, hidden_dim), nn.Tanh(), nn.Linear(hidden_dim, hidden_dim), nn.Tanh(), nn.Linear(hidden_dim, out_dim), ) def forward(self, x): return self.network(x) class RealNVP(nn.Module): """ Non-volume preserving flow. [Dinh et. al. 2017] """ def __init__(self, dim, hidden_dim = 8, base_network=FCNN): super().__init__() self.dim = dim self.t1 = base_network(dim // 2, dim // 2, hidden_dim) self.s1 = base_network(dim // 2, dim // 2, hidden_dim) self.t2 = base_network(dim // 2, dim // 2, hidden_dim) self.s2 = base_network(dim // 2, dim // 2, hidden_dim) def forward(self, x): lower, upper = x[:,:self.dim // 2], x[:,self.dim // 2:] t1_transformed = self.t1(lower) s1_transformed = self.s1(lower) upper = t1_transformed + upper * torch.exp(s1_transformed) t2_transformed = self.t2(upper) s2_transformed = self.s2(upper) lower = t2_transformed + lower * torch.exp(s2_transformed) z = torch.cat([lower, upper], dim=1) log_det = torch.sum(s1_transformed, dim=1) + \ torch.sum(s2_transformed, dim=1) return z, log_det def backward(self, z): lower, upper = z[:,:self.dim // 2], z[:,self.dim // 2:] t2_transformed = self.t2(upper) s2_transformed = self.s2(upper) lower = (lower - t2_transformed) * torch.exp(-s2_transformed) t1_transformed = self.t1(lower) s1_transformed = self.s1(lower) upper = (upper - t1_transformed) * torch.exp(-s1_transformed) x = torch.cat([lower, upper], dim=1) log_det = torch.sum(-s1_transformed, dim=1) + \ torch.sum(-s2_transformed, dim=1) return x, log_det class MAF(nn.Module): """ Masked auto-regressive flow. [Papamakarios et al. 2018] """ def __init__(self, dim, hidden_dim = 8, base_network=FCNN): super().__init__() self.dim = dim self.layers = nn.ModuleList() self.initial_param = nn.Parameter(torch.Tensor(2)) for i in range(1, dim): self.layers += [base_network(i, 2, hidden_dim)] self.reset_parameters() def reset_parameters(self): init.uniform_(self.initial_param, -math.sqrt(0.5), math.sqrt(0.5)) def forward(self, x): z = torch.zeros_like(x) log_det = torch.zeros(z.shape[0]) for i in range(self.dim): if i == 0: mu, alpha = self.initial_param[0], self.initial_param[1] else: out = self.layers[i - 1](x[:, :i]) mu, alpha = out[:, 0], out[:, 1] z[:, i] = (x[:, i] - mu) / torch.exp(alpha) log_det -= alpha return z.flip(dims=(1,)), log_det def backward(self, z): x = torch.zeros_like(z) log_det = torch.zeros(z.shape[0]) z = z.flip(dims=(1,)) for i in range(self.dim): if i == 0: mu, alpha = self.initial_param[0], self.initial_param[1] else: out = self.layers[i - 1](x[:, :i]) mu, alpha = out[:, 0], out[:, 1] x[:, i] = mu + torch.exp(alpha) * z[:, i] log_det += alpha return x, log_det class ActNorm(nn.Module): """ ActNorm layer. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim self.mu = nn.Parameter(torch.zeros(dim, dtype = torch.float)) self.log_sigma = nn.Parameter(torch.zeros(dim, dtype = torch.float)) def forward(self, x): z = x * torch.exp(self.log_sigma) + self.mu log_det = torch.sum(self.log_sigma) return z, log_det def backward(self, z): x = (z - self.mu) / torch.exp(self.log_sigma) log_det = -torch.sum(self.log_sigma) return x, log_det class OneByOneConv(nn.Module): """ Invertible 1x1 convolution. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim W, _ = sp.linalg.qr(np.random.randn(dim, dim)) P, L, U = sp.linalg.lu(W) self.P = torch.tensor(P, dtype = torch.float) self.L = nn.Parameter(torch.tensor(L, dtype = torch.float)) self.S = nn.Parameter(torch.tensor(np.diag(U), dtype = torch.float)) self.U = nn.Parameter(torch.triu(torch.tensor(U, dtype = torch.float), diagonal = 1)) self.W_inv = None def forward(self, x): L = torch.tril(self.L, diagonal = -1) + torch.diag(torch.ones(self.dim)) U = torch.triu(self.U, diagonal = 1) z = x @ self.P @ L @ (U + torch.diag(self.S)) log_det = torch.sum(torch.log(torch.abs(self.S))) return z, log_det def backward(self, z): if not self.W_inv: L = torch.tril(self.L, diagonal = -1) + \ torch.diag(torch.ones(self.dim)) U = torch.triu(self.U, diagonal = 1) W = self.P @ L @ (U + torch.diag(self.S)) self.W_inv = torch.inverse(W) x = z @ self.W_inv log_det = -torch.sum(torch.log(torch.abs(self.S))) return x, log_det class NSF_AR(nn.Module): """ Neural spline flow, auto-regressive. [Durkan et al. 2019] """ def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN): super().__init__() self.dim = dim self.K = K self.B = B self.layers = nn.ModuleList() self.init_param = nn.Parameter(torch.Tensor(3 * K - 1)) for i in range(1, dim): self.layers += [base_network(i, 3 * K - 1, hidden_dim)] self.reset_parameters() def reset_parameters(self): init.uniform_(self.init_param, - 1 / 2, 1 / 2) def forward(self, x): z = torch.zeros_like(x) log_det = torch.zeros(z.shape[0]) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim = 1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim = 1) W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) z[:, i], ld = unconstrained_RQS( x[:, i], W, H, D, inverse=False, tail_bound=self.B) log_det += ld return z, log_det def backward(self, z): x = torch.zeros_like(z) log_det = torch.zeros(x.shape[0]) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim = 1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim = 1) W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) x[:, i], ld = unconstrained_RQS( z[:, i], W, H, D, inverse = True, tail_bound = self.B) log_det += ld return x, log_det class NSF_CL(nn.Module): """ Neural spline flow, coupling layer. [Durkan et al. 2019] """ def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN): super().__init__() self.dim = dim self.K = K self.B = B self.f1 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim) self.f2 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim) def forward(self, x): log_det = torch.zeros(x.shape[0]) lower, upper = x[:, :self.dim // 2], x[:, self.dim // 2:] out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) upper, ld = unconstrained_RQS( upper, W, H, D, inverse=False, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) lower, ld = unconstrained_RQS( lower, W, H, D, inverse=False, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) return torch.cat([lower, upper], dim = 1), log_det def backward(self, z): log_det = torch.zeros(z.shape[0]) lower, upper = z[:, :self.dim // 2], z[:, self.dim // 2:] out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) lower, ld = unconstrained_RQS( lower, W, H, D, inverse=True, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) upper, ld = unconstrained_RQS( upper, W, H, D, inverse = True, tail_bound = self.B) log_det += torch.sum(ld, dim = 1) return torch.cat([lower, upper], dim = 1), log_det
2.453125
2
ioos_qc/config_creator/fx_parser.py
HakaiInstitute/ioos_qc
0
2590
# module pyparsing.py # # Copyright (c) 2003-2019 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from pyparsing import ( Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList, ) import math import operator # map operator symbols to corresponding arithmetic operations epsilon = 1e-12 opn = { "+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv, "^": operator.pow, } fn = { "sin": math.sin, "cos": math.cos, "tan": math.tan, "exp": math.exp, "abs": abs, "trunc": lambda a: int(a), "round": round, "sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0, } exprStack = [] def push_first(toks): exprStack.append(toks[0]) def push_unary_minus(toks): for t in toks: if t == "-": exprStack.append("unary -") else: break def BNF(): """ expop :: '^' multop :: '*' | '/' addop :: '+' | '-' integer :: ['+' | '-'] '0'..'9'+ atom :: PI | E | real | fn '(' expr ')' | '(' expr ')' factor :: atom [ expop factor ]* term :: factor [ multop factor ]* expr :: term [ addop term ]* """ # use CaselessKeyword for e and pi, to avoid accidentally matching # functions that start with 'e' or 'pi' (such as 'exp'); Keyword # and CaselessKeyword only match whole words e = CaselessKeyword("E") pi = CaselessKeyword("PI") # fnumber = Combine(Word("+-"+nums, nums) + # Optional("." + Optional(Word(nums))) + # Optional(e + Word("+-"+nums, nums))) # or use provided pyparsing_common.number, but convert back to str: # fnumber = ppc.number().addParseAction(lambda t: str(t[0])) fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?") ident = Word(alphas, alphanums + "_$") plus, minus, mult, div = map(Literal, "+-*/") lpar, rpar = map(Suppress, "()") addop = plus | minus multop = mult | div expop = Literal("^") expr = Forward() expr_list = delimitedList(Group(expr)) # add parse action that replaces the function identifier with a (name, number of args) tuple fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction( lambda t: t.insert(0, (t.pop(0), len(t[0]))) ) atom = ( addop[...] + ( (fn_call | pi | e | fnumber | ident).setParseAction(push_first) | Group(lpar + expr + rpar) ) ).setParseAction(push_unary_minus) # by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left # exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward() factor <<= atom + (expop + factor).setParseAction(push_first)[...] term = factor + (multop + factor).setParseAction(push_first)[...] expr <<= term + (addop + term).setParseAction(push_first)[...] bnf = expr return bnf def evaluate_stack(s, stats): op, num_args = s.pop(), 0 if isinstance(op, tuple): op, num_args = op if op == "unary -": return -evaluate_stack(s, stats) if op in "+-*/^": # note: operands are pushed onto the stack in reverse order op2 = evaluate_stack(s, stats) op1 = evaluate_stack(s, stats) return opn[op](op1, op2) elif op == "PI": return math.pi # 3.1415926535 elif op == "E": return math.e # 2.718281828 elif op == "mean": return stats['mean'] elif op == "min": return stats['min'] elif op == "max": return stats['max'] elif op == "std": return stats['std'] elif op in fn: # note: args are pushed onto the stack in reverse order args = reversed([evaluate_stack(s, stats) for _ in range(num_args)]) return fn[op](*args) elif op[0].isalpha(): raise Exception("invalid identifier '%s'" % op) else: return float(op) def eval_fx(fx, stats): """Given fx and stats ('min', 'max', 'mean', 'std') return the result""" _ = BNF().parseString(fx, parseAll=True) val = evaluate_stack(exprStack[:], stats) return val
2.703125
3
scanlogger.py
pythonhacker/pyscanlogd
1
2591
# -- coding: utf-8 #!/usr/bin/env python """ pyscanlogger: Port scan detector/logger tool, inspired by scanlogd {http://www.openwall.com/scanlogd} but with added ability to log slow port-scans. Features 1. Detects all stealth (half-open) and full-connect scans. 2. Detects Idle scan and logs it correctly using correlation! 3. Detects SCTP scan. 4. Detects slow port-scans also. Modification History Mar 17 2010 - Cleaned up code to publish to google. Apr 8 2010 - Better detection of TCP full-connect scan without spurious and incorrect logging. Better logging functions. Licensed under GNU GPL v3.0. """ import sys, os import dpkt, pcap import struct import socket import time import threading import optparse import entry import timerlist __author__ = "pythonhacker" __maintainer__ = "pythonhacker" __version__ = '0.5.1' __modified__ = 'Thu Apr 8 19:21:11 IST 2010' # UDP - in progress... SCAN_TIMEOUT = 5 WEIGHT_THRESHOLD = 25 PIDFILE="/var/run/pyscanlogger.pid" # TCP flag constants TH_URG=dpkt.tcp.TH_URG TH_ACK=dpkt.tcp.TH_ACK TH_PSH=dpkt.tcp.TH_PUSH TH_RST=dpkt.tcp.TH_RST TH_SYN=dpkt.tcp.TH_SYN TH_FIN=dpkt.tcp.TH_FIN # Protocols TCP=dpkt.tcp.TCP UDP=dpkt.udp.UDP SCTP=dpkt.sctp.SCTP get_timestamp = lambda : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) ip2quad = lambda x: socket.inet_ntoa(struct.pack('I', x)) scan_ip2quad = lambda scan: map(ip2quad, [scan.src, scan.dst]) class ScanLogger(object): """ Port scan detector/logger """ # TCP flags to scan type mapping scan_types = {0: 'TCP null', TH_FIN: 'TCP fin', TH_SYN: 'TCP syn', TH_SYN|TH_RST: 'TCP syn', TH_ACK: 'TCP ack', TH_URG|TH_PSH|TH_FIN: 'TCP x-mas', TH_URG|TH_PSH|TH_FIN|TH_ACK: 'TCP x-mas', TH_SYN|TH_FIN: 'TCP syn/fin', TH_FIN|TH_ACK: 'TCP fin/ack', TH_SYN|TH_ACK: 'TCP full-connect', TH_URG|TH_PSH|TH_ACK|TH_RST|TH_SYN|TH_FIN: 'TCP all-flags', TH_SYN|TH_ACK|TH_RST: 'TCP full-connect', # Not a scan TH_RST|TH_ACK: 'reply'} def __init__(self, timeout, threshold, maxsize, daemon=True, logfile='/var/log/scanlog'): self.scans = entry.EntryLog(maxsize) self.long_scans = entry.EntryLog(maxsize) # Port scan weight threshold self.threshold = threshold # Timeout for scan entries self.timeout = timeout # Long-period scan timeouts self.timeout_l = 3600 # Long-period scan threshold self.threshold_l = self.threshold/2 # Daemonize ? self.daemon = daemon # Log file try: self.scanlog = open(logfile,'a') print >> sys.stderr, 'Scan logs will be saved to %s' % logfile except (IOError, OSError), (errno, strerror): print >> sys.stderr, "Error opening scan log file %s => %s" % (logfile, strerror) self.scanlog = None # Recent scans - this list allows to keep scan information # upto last 'n' seconds, so as to not call duplicate scans # in the same time-period. 'n' is 60 sec by default. # Since entries time out in 60 seconds, max size is equal # to maximum such entries possible in 60 sec - assuming # a scan occurs at most every 5 seconds, this would be 12. self.recent_scans = timerlist.TimerList(12, 60.0) def hash_func(self, addr): """ Hash a host address """ value = addr h = 0 while value: # print value h ^= value value = value >> 9 return h & (8192-1) def mix(self, a, b, c): a -= b; a -= c; a ^= (c>>13) b -= c; b -= a; b ^= (a<<8) c -= a; c -= b; c ^= (b>>13) a -= b; a -= c; a ^= (c>>12) b -= c; b -= a; b ^= (a<<16) c -= a; c -= b; c ^= (b>>5) a -= b; a -= c; a ^= (c>>3) b -= c; b -= a; b ^= (a<<10) c -= a; c -= b; c ^= (b>>15) return abs(c) def host_hash(self, src, dst): """ Hash mix two host addresses """ return self.hash_func(self.mix(src, dst, 0xffffff)) def log(self, msg): """ Log a message to console and/or log file """ line = '[%s]: %s' % (get_timestamp(), msg) if self.scanlog: self.scanlog.write(line + '\n') self.scanlog.flush() if not self.daemon: print >> sys.stderr, line def log_scan(self, scan, continuation=False, slow_scan=False, unsure=False): """ Log the scan to file and/or console """ srcip, dstip = scan_ip2quad(scan) ports = ','.join([str(port) for port in scan.ports]) if not continuation: tup = [scan.type,scan.flags_or,srcip,dstip, ports] if not slow_scan: if scan.type != 'Idle': line = '%s scan (flags:%d) from %s to %s (ports:%s)' else: tup.append(ip2quad(scan.zombie)) line = '%s scan (flags: %d) from %s to %s (ports: %s) using zombie host %s' else: tup.append(scan.time_avg) if unsure: line = 'Possible slow %s scan (flags:%d) from %s to %s (ports:%s), average timediff %.2fs' else: line = 'Slow %s scan (flags:%d) from %s to %s (ports:%s), average timediff %.2fs' else: tup = [scan.type, srcip,dstip, ports] if not slow_scan: if scan.type != 'Idle': line = 'Continuation of %s scan from %s to %s (ports:%s)' else: tup.append(ip2quad(scan.zombie)) line = 'Continuation of %s scan from %s to %s (ports: %s) using zombie host %s' else: tup.append(scan.time_avg) line = 'Continuation of slow %s scan from %s to %s (ports:%s), average timediff %.2fs' msg = line % tuple(tup) self.log(msg) def update_ports(self, scan, dport, flags): scan.flags_or |= flags if dport in scan.ports: return # Add weight for port if dport < 1024: scan.weight += 3 else: scan.weight += 1 scan.ports.append(dport) def inspect_scan(self, scan, slow_scan=False): # Sure scan is_scan = ((slow_scan and scan.weight >= self.threshold_l) or (not slow_scan and scan.weight >= self.threshold)) # Possible scan maybe_scan = (slow_scan and len(scan.ports)>=3 and len(scan.timediffs)>=4 and (scan.weight < self.threshold_l)) not_scan = False if is_scan or maybe_scan: scan.logged = True if scan.proto==TCP: idle_scan = False if scan.flags_or==TH_RST: # None does scan using RST, however this could be # return packets from a zombie host to the scanning # host when a scanning host is doing an idle scan. # Basically # A -scanning host # B - zombie host # C - target host # If A does an idle scan on C with B as zombie, # it will appear to C as if B is syn scanning it # and later we could get an apparent RST "scan" # from B to A # Correlation: If 'RST scan' detected from X to Y # See if there was a SYN scan recently from host # X to host Z. Then actually Y is idle scanning # Z dummy_scans, idle_ports = [], [] for item in reversed(self.recent_scans): rscan = item[1] if rscan.src==scan.src and rscan.flags_or==TH_SYN and ((rscan.timestamp - scan.timestamp)<30): idle_scan = True idle_ports.append(rscan.ports) dummy_scans.append(item) if idle_scan: scan.src = scan.dst scan.dst = rscan.dst scan.zombie = rscan.src scan.type = 'Idle' scan.ports = idle_ports # for d in dummy_scans: # self.recent_scans.remove(d) else: # Remove entry if slow_scan: del self.long_scans[scan.hash] else: del self.scans[scan.hash] return False else: scan.type = self.scan_types.get(scan.flags_or,'unknown') if scan.type in ('', 'reply'): not_scan = True # If we see scan flags 22 from A->B, make sure that # there was no recent full-connect scan from B->A, if # so this is spurious and should be ignored. if scan.flags_or == (TH_SYN|TH_ACK|TH_RST) and len(self.recent_scans): recent1 = self.recent_scans[-1:-2:-1] for recent in recent1: # Was not a scan, skip if not recent.is_scan: continue if recent.type == 'TCP full-connect' and ((scan.src == recent.dst) and (scan.dst == recent.src)): # Spurious self.log("Ignoring spurious TCP full-connect scan from %s" % ' to '.join(scan_ip2quad(scan))) not_scan = True break # If this is a syn scan, see if there was a recent idle scan # with this as zombie, then ignore it... elif scan.flags_or == TH_SYN and len(self.recent_scans): # Try last 1 scans recent1 = self.recent_scans[-1:-2:-1] for recent in recent1: if recent.type=='Idle' and scan.src==recent.zombie: self.log('Ignoring mis-interpreted syn scan from zombie host %s' % ' to '.join(scan_ip2quad(scan))) break # Reply from B->A for full-connect scan from A->B elif (recent.type == 'reply' and ((scan.src == recent.dst) and (scan.dst == recent.src))): scan.type = 'TCP full-connect' break elif scan.proto==UDP: scan.type = 'UDP' # Reset flags for UDP scan scan.flags_or = 0 elif scan.proto==SCTP: if scan.chunk_type==1: scan.type = 'SCTP Init' elif scan.chunk_type==10: scan.type = 'SCTP COOKIE_ECHO' # See if this was logged recently scanentry = entry.RecentScanEntry(scan, not not_scan) if scanentry not in self.recent_scans: continuation=False self.recent_scans.append(scanentry) else: continuation=True if not not_scan: self.log_scan(scan, continuation=continuation, slow_scan=slow_scan, unsure=maybe_scan) # Remove entry if slow_scan: del self.long_scans[scan.hash] else: del self.scans[scan.hash] return True else: return False def process(self, pkt): if not hasattr(pkt, 'ip'): return ip = pkt.ip # Ignore non-tcp, non-udp packets if type(ip.data) not in (TCP, UDP, SCTP): return pload = ip.data src,dst,dport,flags = int(struct.unpack('I',ip.src)[0]),int(struct.unpack('I', ip.dst)[0]),int(pload.dport),0 proto = type(pload) if proto == TCP: flags = pload.flags key = self.host_hash(src,dst) curr=time.time() # Keep dropping old entries self.recent_scans.collect() if key in self.scans: scan = self.scans[key] if scan.src != src: # Skip packets in reverse direction or invalid protocol return timediff = curr - scan.timestamp # Update only if not too old, else skip and remove entry if (timediff > self.timeout): # Add entry in long_scans if timediff not larger # than longscan timeout prev = self.scans[key].timestamp if timediff<=self.timeout_l: if key not in self.long_scans: lscan = entry.ScanEntry(key) lscan.src = src lscan.dst = dst lscan.timestamp = curr lscan.timediffs.append(curr - prev) lscan.flags_or |= flags lscan.ports.append(dport) lscan.proto = proto self.long_scans[key] = lscan else: lscan = self.long_scans[key] lscan.timestamp = curr lscan.flags_or |= flags lscan.timediffs.append(curr - prev) lscan.update_time_sd() self.update_ports(lscan, dport, flags) if lscan.time_sd<2: # SD is less than 2, possible slow scan # update port weights... # print 'Weight=>',lscan.weight if not self.inspect_scan(lscan, True): # Not a scan, check # of entries - if too many # then this is a regular network activity # but not a scan, so remove entry if len(lscan.timediffs)>=10: # print lscan.src, lscan.timediffs, lscan.time_sd print 'Removing',key,lscan.src,'since not a scan' del self.long_scans[key] elif len(lscan.timediffs)>2: # More than 2 entries, but SD is too large, # delete the entry # print 'Removing',key,lscan.src,'since SD is',lscan.time_sd del self.long_scans[key] else: # Too large timeout, remove key del self.long_scans[key] del self.scans[key] return if scan.logged: return scan.timestamp = curr self.update_ports(scan, dport, flags) self.inspect_scan(scan) else: # Add new entry scan = entry.ScanEntry(key) scan.src = src scan.dst = dst scan.timestamp = curr scan.flags_or |= flags if proto==SCTP: scan.chunk_type = pload.chunks[0].type scan.ports.append(dport) scan.proto = proto self.scans[key] = scan def loop(self): pc = pcap.pcap() decode = { pcap.DLT_LOOP:dpkt.loopback.Loopback, pcap.DLT_NULL:dpkt.loopback.Loopback, pcap.DLT_EN10MB:dpkt.ethernet.Ethernet } [pc.datalink()] try: print 'listening on %s: %s' % (pc.name, pc.filter) for ts, pkt in pc: self.process(decode(pkt)) except KeyboardInterrupt: if not self.daemon: nrecv, ndrop, nifdrop = pc.stats() print '\n%d packets received by filter' % nrecv print '%d packets dropped by kernel' % ndrop def run_daemon(self): # Disconnect from tty try: pid = os.fork() if pid>0: sys.exit(0) except OSError, e: print >>sys.stderr, "fork #1 failed", e sys.exit(1) os.setsid() os.umask(0) # Second fork try: pid = os.fork() if pid>0: open(PIDFILE,'w').write(str(pid)) sys.exit(0) except OSError, e: print >>sys.stderr, "fork #2 failed", e sys.exit(1) self.loop() def run(self): # If dameon, then create a new thread and wait for it if self.daemon: print 'Daemonizing...' self.run_daemon() else: # Run in foreground self.loop() def main(): if os.geteuid() != 0: sys.exit("You must be super-user to run this program") o=optparse.OptionParser() o.add_option("-d", "--daemonize", dest="daemon", help="Daemonize", action="store_true", default=False) o.add_option("-f", "--logfile", dest="logfile", help="File to save logs to", default="/var/log/scanlog") options, args = o.parse_args() s=ScanLogger(SCAN_TIMEOUT, WEIGHT_THRESHOLD, 8192, options.daemon, options.logfile) s.run() if __name__ == '__main__': main()
2.375
2
src/util/util.py
ashleylst/DSDmodel
1
2592
from itertools import combinations import copy def get_reverse(n): if n == 1: return 0 else: return 1 def get_edge_info(e): v = [0 for i in range(2)] n = [0 for i in range(2)] t = 0 for x in e: v[t], n[t] = x t += 1 return v, n def sort_e_by_domain(val): return val[0][1] def sort_by_strand(val): return val[0][0] def check_edge_in_tuplelist(edge, tpl): for i in tpl: if edge in i: return True return False def compare(a, b): return (a > b) - (a < b) def flip(i): if i == 0: i = 1 elif i == 1: i = 0 return i def get_free_domains(limits, blocks, bound): limits = sorted(limits) interval = limits[1] - limits[0] for i in blocks: if limits[1] > i > limits[0]: tmp = abs(bound - i) if tmp < interval: interval = tmp return interval def get_combinations(oldlen, newlen, cursor, indexlist): combold = list(combinations(indexlist[cursor:oldlen], 2)) combself = [(i, i) for i in range(0, oldlen)] combnew = [] if oldlen != newlen: for i in range(0, oldlen): for j in range(oldlen, newlen): combnew.append((i, j)) return combold + combnew + combself def get_migrate_nodes(edges, indices, startstrand): d = [] for i in indices: vi, ni = get_edge_info(edges[i][0]) if vi[0] == startstrand: d.append(ni[0]) else: d.append(ni[1]) d.sort() return d def check_following_migration(edges, p=0): """ :param edges: :return: """ e = copy.copy(edges) visited = [False for _ in e] miggroup = [] cnt = -1 for i in range(0, len(e)): if visited[i]: continue e[i] = list(e[i]) e[i][p] = list(e[i][p]) t1 = sorted(e[i][p], key=lambda tup: tup[0]) if not visited[i]: visited[i] = True miggroup.append([i]) cnt += 1 for j in range(0, len(e)): if j != i and not visited[j]: e[j] = list(e[j]) e[j][p] = list(e[j][p]) t2 = sorted(e[j][p], key=lambda tup: tup[0]) if (t2[0][0] != t1[0][0]) or (t2[1][0] != t1[1][0]): continue for num in range(0, len(miggroup[cnt])): t1 = sorted(e[miggroup[cnt][num]][p], key=lambda tup: tup[0]) if (t1[0][1] + 1 == t2[0][1] and t1[1][1] - 1 == t2[1][1]) \ or (t1[0][1] - 1 == t2[0][1] and t1[1][1] + 1 == t2[1][1]): visited[j] = True miggroup[cnt].append(j) break return miggroup def get_absdist(domain1, domain2): """ :param domain1: :param domain2: :return: """ return abs(domain1[1] - domain2[1]) def get_closet_domain_to_target(target, domains): """ :param target: :param domains: :return: """ closet = 10000 closetd = () for i in domains: dist = get_absdist(i, target) if dist < closet: closet = dist closetd = i return closetd def get_domains_on_2sides(target1, target2, domains1, domains2): """ :param target1: :param target2: :param domains1: :param domains2: :return: """ if target1[0] == domains1[0][0]: closetd1 = get_closet_domain_to_target(target1, domains1) elif target2[0] == domains1[0][0]: closetd1 = get_closet_domain_to_target(target2, domains1) if target1[0] == domains2[0][0]: closetd2 = get_closet_domain_to_target(target1, domains2) elif target2[0] == domains2[0][0]: closetd2 = get_closet_domain_to_target(target2, domains2) return closetd1, closetd2 def get_closest_target(domains, targets): """ :return: """ domains = sorted(domains, key=lambda tup: tup[1]) mindist = 10000 mint = None for t in targets: dist = min(get_absdist(t, domains[0]), get_absdist(t, domains[len(domains) - 1])) if dist < mindist: mint = t return mint def check_continuity(a, b): for i in a: for j in b: if i + 1 == j or i - 1 == j: return i, j return None def check_bond_existence(d1, d2, l1, l2): for i in range(len(l1)): if d1 == l1[i] and d2 == l2[i]: return True return False
2.625
3
examples/question_answering/qa_sparse_train.py
ebell495/nn_pruning
250
2593
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sparse Fine-tuning the library models for question answering. """ # You can also adapt this script on your own question answering task. Pointers for this are left as comments. from nn_pruning.sparse_trainer import SparseTrainer from .qa_train import QATrainer # SparseTrainer should appear first in the base classes, as its functions must override QATrainer and its base classes (Trainer) class QASparseTrainer(SparseTrainer, QATrainer): def __init__(self, sparse_args, *args, **kwargs): QATrainer.__init__(self, *args, **kwargs) SparseTrainer.__init__(self, sparse_args)
1.929688
2
ironic/drivers/modules/ilo/raid.py
armohamm/ironic
2
2594
<reponame>armohamm/ironic # Copyright 2018 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ iLO5 RAID specific methods """ from ironic_lib import metrics_utils from oslo_log import log as logging from oslo_utils import importutils from ironic.common import exception from ironic.common.i18n import _ from ironic.common import raid from ironic.common import states from ironic.conductor import utils as manager_utils from ironic import conf from ironic.drivers import base from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.ilo import common as ilo_common LOG = logging.getLogger(__name__) CONF = conf.CONF METRICS = metrics_utils.get_metrics_logger(__name__) ilo_error = importutils.try_import('proliantutils.exception') class Ilo5RAID(base.RAIDInterface): """Implementation of OOB RAIDInterface for iLO5.""" def get_properties(self): """Return the properties of the interface.""" return ilo_common.REQUIRED_PROPERTIES def _set_clean_failed(self, task, msg, exc): LOG.error("RAID configuration job failed for node %(node)s. " "Message: '%(message)s'.", {'node': task.node.uuid, 'message': msg}) task.node.last_error = msg task.process_event('fail') def _set_driver_internal_true_value(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info[key] = True task.node.driver_internal_info = driver_internal_info task.node.save() def _set_driver_internal_false_value(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info[key] = False task.node.driver_internal_info = driver_internal_info task.node.save() def _pop_driver_internal_values(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info.pop(key, None) task.node.driver_internal_info = driver_internal_info task.node.save() def _prepare_for_read_raid(self, task, raid_step): deploy_opts = deploy_utils.build_agent_options(task.node) task.driver.boot.prepare_ramdisk(task, deploy_opts) manager_utils.node_power_action(task, states.REBOOT) if raid_step == 'create_raid': self._set_driver_internal_true_value( task, 'ilo_raid_create_in_progress') else: self._set_driver_internal_true_value( task, 'ilo_raid_delete_in_progress') self._set_driver_internal_true_value(task, 'cleaning_reboot') self._set_driver_internal_false_value(task, 'skip_current_clean_step') @METRICS.timer('Ilo5RAID.create_configuration') @base.clean_step(priority=0, abortable=False, argsinfo={ 'create_root_volume': { 'description': ( 'This specifies whether to create the root volume. ' 'Defaults to `True`.' ), 'required': False }, 'create_nonroot_volumes': { 'description': ( 'This specifies whether to create the non-root volumes. ' 'Defaults to `True`.' ), 'required': False } }) def create_configuration(self, task, create_root_volume=True, create_nonroot_volumes=True): """Create a RAID configuration on a bare metal using agent ramdisk. This method creates a RAID configuration on the given node. :param task: a TaskManager instance. :param create_root_volume: If True, a root volume is created during RAID configuration. Otherwise, no root volume is created. Default is True. :param create_nonroot_volumes: If True, non-root volumes are created. If False, no non-root volumes are created. Default is True. :raises: MissingParameterValue, if node.target_raid_config is missing or was found to be empty after skipping root volume and/or non-root volumes. :raises: NodeCleaningFailure, on failure to execute step. """ node = task.node target_raid_config = raid.filter_target_raid_config( node, create_root_volume=create_root_volume, create_nonroot_volumes=create_nonroot_volumes) driver_internal_info = node.driver_internal_info driver_internal_info['target_raid_config'] = target_raid_config LOG.debug("Calling OOB RAID create_configuration for node %(node)s " "with the following target RAID configuration: %(target)s", {'node': node.uuid, 'target': target_raid_config}) ilo_object = ilo_common.get_ilo_object(node) try: # Raid configuration in progress, checking status if not driver_internal_info.get('ilo_raid_create_in_progress'): ilo_object.create_raid_configuration(target_raid_config) self._prepare_for_read_raid(task, 'create_raid') return states.CLEANWAIT else: # Raid configuration is done, updating raid_config raid_conf = ( ilo_object.read_raid_configuration( raid_config=target_raid_config)) if len(raid_conf['logical_disks']): raid.update_raid_info(node, raid_conf) LOG.debug("Node %(uuid)s raid create clean step is done.", {'uuid': node.uuid}) self._pop_driver_internal_values( task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() else: # Raid configuration failed msg = "Unable to create raid" self._pop_driver_internal_values( task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() raise exception.NodeCleaningFailure( "Clean step create_configuration failed " "on node %(node)s with error: %(err)s" % {'node': node.uuid, 'err': msg}) except ilo_error.IloError as ilo_exception: operation = (_("Failed to create raid configuration on node %s") % node.uuid) self._pop_driver_internal_values(task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() self._set_clean_failed(task, operation, ilo_exception) @METRICS.timer('Ilo5RAID.delete_configuration') @base.clean_step(priority=0, abortable=False) def delete_configuration(self, task): """Delete the RAID configuration. :param task: a TaskManager instance containing the node to act on. :raises: NodeCleaningFailure, on failure to execute step. """ node = task.node LOG.debug("OOB RAID delete_configuration invoked for node %s.", node.uuid) driver_internal_info = node.driver_internal_info ilo_object = ilo_common.get_ilo_object(node) try: # Raid configuration in progress, checking status if not driver_internal_info.get('ilo_raid_delete_in_progress'): ilo_object.delete_raid_configuration() self._prepare_for_read_raid(task, 'delete_raid') return states.CLEANWAIT else: # Raid configuration is done, updating raid_config raid_conf = ilo_object.read_raid_configuration() if not len(raid_conf['logical_disks']): node.raid_config = {} LOG.debug("Node %(uuid)s raid delete clean step is done.", {'uuid': node.uuid}) self._pop_driver_internal_values( task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() else: # Raid configuration failed msg = ("Unable to delete this logical disks: %s" % raid_conf['logical_disks']) self._pop_driver_internal_values( task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() raise exception.NodeCleaningFailure( "Clean step delete_configuration failed " "on node %(node)s with error: %(err)s" % {'node': node.uuid, 'err': msg}) except ilo_error.IloLogicalDriveNotFoundError: LOG.info("No logical drive found to delete on node %(node)s", {'node': node.uuid}) except ilo_error.IloError as ilo_exception: operation = (_("Failed to delete raid configuration on node %s") % node.uuid) self._pop_driver_internal_values(task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() self._set_clean_failed(task, operation, ilo_exception)
1.617188
2
src/petronia/aid/bootstrap/__init__.py
groboclown/petronia
19
2595
""" Common Petronia imports for bootstrap parts of an extension. This should be imported along with the `simp` module. """ from ...base.bus import ( EventBus, ListenerRegistrar, ListenerSetup, QueuePriority, ExtensionMetadataStruct, register_event, EVENT_WILDCARD, TARGET_WILDCARD, QUEUE_EVENT_NORMAL, QUEUE_EVENT_HIGH, QUEUE_EVENT_IO, QUEUE_EVENT_TYPES ) from ...base.participant import ( create_singleton_identity, NOT_PARTICIPANT, ) from ...base.events import ( # These are generally just bootstrap events. DisposeCompleteEvent, as_dispose_complete_listener, RequestDisposeEvent, as_request_dispose_listener, SystemStartedEvent, as_system_started_listener, ) from ...base.events.bus import ( EventProtectionModel, GLOBAL_EVENT_PROTECTION, INTERNAL_EVENT_PROTECTION, PRODUCE_EVENT_PROTECTION, CONSUME_EVENT_PROTECTION, REQUEST_EVENT_PROTECTION, RESPONSE_EVENT_PROTECTION, ) from ...core.extensions.api import ANY_VERSION from ...core.shutdown.api import ( SystemShutdownEvent, as_system_shutdown_listener, SystemShutdownFinalizeEvent, as_system_shutdown_finalize_listener, TARGET_ID_SYSTEM_SHUTDOWN, )
1.578125
2
examples/dump-properties.py
zachjweiner/pyopencl
7
2596
<filename>examples/dump-properties.py import pyopencl as cl from optparse import OptionParser parser = OptionParser() parser.add_option("-s", "--short", action="store_true", help="don't print all device properties") (options, args) = parser.parse_args() def print_info(obj, info_cls): for info_name in sorted(dir(info_cls)): if not info_name.startswith("_") and info_name != "to_string": info = getattr(info_cls, info_name) try: info_value = obj.get_info(info) except: info_value = "<error>" if (info_cls == cl.device_info and info_name == "PARTITION_TYPES_EXT" and isinstance(info_value, list)): print("{}: {}".format(info_name, [ cl.device_partition_property_ext.to_string(v, "<unknown device partition property %d>") for v in info_value])) else: try: print(f"{info_name}: {info_value}") except: print("%s: <error>" % info_name) for platform in cl.get_platforms(): print(75*"=") print(platform) print(75*"=") if not options.short: print_info(platform, cl.platform_info) for device in platform.get_devices(): if not options.short: print(75*"-") print(device) if not options.short: print(75*"-") print_info(device, cl.device_info) ctx = cl.Context([device]) for mf in [ cl.mem_flags.READ_ONLY, #cl.mem_flags.READ_WRITE, #cl.mem_flags.WRITE_ONLY ]: for itype in [ cl.mem_object_type.IMAGE2D, cl.mem_object_type.IMAGE3D ]: try: formats = cl.get_supported_image_formats(ctx, mf, itype) except: formats = "<error>" else: def str_chd_type(chdtype): result = cl.channel_type.to_string(chdtype, "<unknown channel data type %d>") result = result.replace("_INT", "") result = result.replace("UNSIGNED", "U") result = result.replace("SIGNED", "S") result = result.replace("NORM", "N") result = result.replace("FLOAT", "F") return result formats = ", ".join( "{}-{}".format( cl.channel_order.to_string(iform.channel_order, "<unknown channel order 0x%x>"), str_chd_type(iform.channel_data_type)) for iform in formats) print("{} {} FORMATS: {}\n".format( cl.mem_object_type.to_string(itype), cl.mem_flags.to_string(mf), formats)) del ctx
2.46875
2
interfaces/acados_template/acados_template/acados_ocp_solver.py
jgillis/acados
1
2597
# -*- coding: future_fstrings -*- # # Copyright 2019 <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # This file is part of acados. # # The 2-Clause BSD License # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.; # import sys, os, json import numpy as np from ctypes import * from casadi import CasadiMeta, Function, SX from copy import deepcopy from .generate_c_code_explicit_ode import generate_c_code_explicit_ode from .generate_c_code_implicit_ode import generate_c_code_implicit_ode from .generate_c_code_gnsf import generate_c_code_gnsf from .generate_c_code_constraint import generate_c_code_constraint from .generate_c_code_nls_cost import generate_c_code_nls_cost from .generate_c_code_external_cost import generate_c_code_external_cost from .acados_ocp import AcadosOcp from .acados_model import acados_model_strip_casadi_symbolics from .utils import is_column, is_empty, casadi_length, render_template, acados_class2dict,\ format_class_dict, ocp_check_against_layout, np_array_to_list, make_model_consistent,\ set_up_imported_gnsf_model def make_ocp_dims_consistent(acados_ocp): dims = acados_ocp.dims cost = acados_ocp.cost constraints = acados_ocp.constraints model = acados_ocp.model opts = acados_ocp.solver_options # nx if is_column(model.x): dims.nx = casadi_length(model.x) else: raise Exception('model.x should be column vector!') # nu if is_empty(model.u): dims.nu = 0 else: dims.nu = casadi_length(model.u) # nz if is_empty(model.z): dims.nz = 0 else: dims.nz = casadi_length(model.z) # np if is_empty(model.p): dims.np = 0 else: dims.np = casadi_length(model.p) if acados_ocp.parameter_values.shape[0] != dims.np: raise Exception('inconsistent dimension np, regarding model.p and parameter_values.') ## cost # path if cost.cost_type == 'LINEAR_LS': ny = cost.W.shape[0] if cost.Vx.shape[0] != ny or cost.Vu.shape[0] != ny: raise Exception('inconsistent dimension ny, regarding W, Vx, Vu.' + \ f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}]\n') if dims.nz != 0 and cost.Vz.shape[0] != ny: raise Exception('inconsistent dimension ny, regarding W, Vx, Vu, Vz.' + \ f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}], Vz[{cost.Vz.shape}]\n') if cost.Vx.shape[1] != dims.nx and ny != 0: raise Exception('inconsistent dimension: Vx should have nx columns.') if cost.Vu.shape[1] != dims.nu and ny != 0: raise Exception('inconsistent dimension: Vu should have nu columns.') if cost.yref.shape[0] != ny: raise Exception('inconsistent dimension: regarding W, yref.' + \ f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n') dims.ny = ny elif cost.cost_type == 'NONLINEAR_LS': ny = cost.W.shape[0] if is_empty(model.cost_y_expr) and ny != 0: raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.') elif casadi_length(model.cost_y_expr) != ny: raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.') if cost.yref.shape[0] != ny: raise Exception('inconsistent dimension: regarding W, yref.' + \ f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n') dims.ny = ny # terminal if cost.cost_type_e == 'LINEAR_LS': ny_e = cost.W_e.shape[0] if cost.Vx_e.shape[0] != ny_e: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.' + \ f'\nGot W_e[{cost.W_e.shape}], Vx_e[{cost.Vx_e.shape}]') if cost.Vx_e.shape[1] != dims.nx and ny_e != 0: raise Exception('inconsistent dimension: Vx_e should have nx columns.') if cost.yref_e.shape[0] != ny_e: raise Exception('inconsistent dimension: regarding W_e, yref_e.') dims.ny_e = ny_e elif cost.cost_type_e == 'NONLINEAR_LS': ny_e = cost.W_e.shape[0] if is_empty(model.cost_y_expr_e) and ny_e != 0: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.') elif casadi_length(model.cost_y_expr_e) != ny_e: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.') if cost.yref_e.shape[0] != ny_e: raise Exception('inconsistent dimension: regarding W_e, yref_e.') dims.ny_e = ny_e ## constraints # initial if (constraints.lbx_0 == [] and constraints.ubx_0 == []): dims.nbx_0 = 0 else: this_shape = constraints.lbx_0.shape other_shape = constraints.ubx_0.shape if not this_shape == other_shape: raise Exception('lbx_0, ubx_0 have different shapes!') if not is_column(constraints.lbx_0): raise Exception('lbx_0, ubx_0 must be column vectors!') dims.nbx_0 = constraints.lbx_0.size if all(constraints.lbx_0 == constraints.ubx_0): dims.nbxe_0 = dims.nbx_0 # path nbx = constraints.idxbx.shape[0] if constraints.ubx.shape[0] != nbx or constraints.lbx.shape[0] != nbx: raise Exception('inconsistent dimension nbx, regarding idxbx, ubx, lbx.') else: dims.nbx = nbx nbu = constraints.idxbu.shape[0] if constraints.ubu.shape[0] != nbu or constraints.lbu.shape[0] != nbu: raise Exception('inconsistent dimension nbu, regarding idxbu, ubu, lbu.') else: dims.nbu = nbu ng = constraints.lg.shape[0] if constraints.ug.shape[0] != ng or constraints.C.shape[0] != ng \ or constraints.D.shape[0] != ng: raise Exception('inconsistent dimension ng, regarding lg, ug, C, D.') else: dims.ng = ng if not is_empty(model.con_h_expr): nh = casadi_length(model.con_h_expr) else: nh = 0 if constraints.uh.shape[0] != nh or constraints.lh.shape[0] != nh: raise Exception('inconsistent dimension nh, regarding lh, uh, con_h_expr.') else: dims.nh = nh if is_empty(model.con_phi_expr): dims.nphi = 0 dims.nr = 0 else: dims.nphi = casadi_length(model.con_phi_expr) if is_empty(model.con_r_expr): raise Exception('convex over nonlinear constraints: con_r_expr but con_phi_expr is nonempty') else: dims.nr = casadi_length(model.con_r_expr) # terminal nbx_e = constraints.idxbx_e.shape[0] if constraints.ubx_e.shape[0] != nbx_e or constraints.lbx_e.shape[0] != nbx_e: raise Exception('inconsistent dimension nbx_e, regarding idxbx_e, ubx_e, lbx_e.') else: dims.nbx_e = nbx_e ng_e = constraints.lg_e.shape[0] if constraints.ug_e.shape[0] != ng_e or constraints.C_e.shape[0] != ng_e: raise Exception('inconsistent dimension ng_e, regarding_e lg_e, ug_e, C_e.') else: dims.ng_e = ng_e if not is_empty(model.con_h_expr_e): nh_e = casadi_length(model.con_h_expr_e) else: nh_e = 0 if constraints.uh_e.shape[0] != nh_e or constraints.lh_e.shape[0] != nh_e: raise Exception('inconsistent dimension nh_e, regarding lh_e, uh_e, con_h_expr_e.') else: dims.nh_e = nh_e if is_empty(model.con_phi_expr_e): dims.nphi_e = 0 dims.nr_e = 0 else: dims.nphi_e = casadi_length(model.con_phi_expr_e) if is_empty(model.con_r_expr_e): raise Exception('convex over nonlinear constraints: con_r_expr_e but con_phi_expr_e is nonempty') else: dims.nr_e = casadi_length(model.con_r_expr_e) # Slack dimensions nsbx = constraints.idxsbx.shape[0] if is_empty(constraints.lsbx): constraints.lsbx = np.zeros((nsbx,)) elif constraints.lsbx.shape[0] != nsbx: raise Exception('inconsistent dimension nsbx, regarding idxsbx, lsbx.') if is_empty(constraints.usbx): constraints.usbx = np.zeros((nsbx,)) elif constraints.usbx.shape[0] != nsbx: raise Exception('inconsistent dimension nsbx, regarding idxsbx, usbx.') dims.nsbx = nsbx nsbu = constraints.idxsbu.shape[0] if is_empty(constraints.lsbu): constraints.lsbu = np.zeros((nsbu,)) elif constraints.lsbu.shape[0] != nsbu: raise Exception('inconsistent dimension nsbu, regarding idxsbu, lsbu.') if is_empty(constraints.usbu): constraints.usbu = np.zeros((nsbu,)) elif constraints.usbu.shape[0] != nsbu: raise Exception('inconsistent dimension nsbu, regarding idxsbu, usbu.') dims.nsbu = nsbu nsh = constraints.idxsh.shape[0] if is_empty(constraints.lsh): constraints.lsh = np.zeros((nsh,)) elif constraints.lsh.shape[0] != nsh: raise Exception('inconsistent dimension nsh, regarding idxsh, lsh.') if is_empty(constraints.ush): constraints.ush = np.zeros((nsh,)) elif constraints.ush.shape[0] != nsh: raise Exception('inconsistent dimension nsh, regarding idxsh, ush.') dims.nsh = nsh nsphi = constraints.idxsphi.shape[0] if is_empty(constraints.lsphi): constraints.lsphi = np.zeros((nsphi,)) elif constraints.lsphi.shape[0] != nsphi: raise Exception('inconsistent dimension nsphi, regarding idxsphi, lsphi.') if is_empty(constraints.usphi): constraints.usphi = np.zeros((nsphi,)) elif constraints.usphi.shape[0] != nsphi: raise Exception('inconsistent dimension nsphi, regarding idxsphi, usphi.') dims.nsphi = nsphi nsg = constraints.idxsg.shape[0] if is_empty(constraints.lsg): constraints.lsg = np.zeros((nsg,)) elif constraints.lsg.shape[0] != nsg: raise Exception('inconsistent dimension nsg, regarding idxsg, lsg.') if is_empty(constraints.usg): constraints.usg = np.zeros((nsg,)) elif constraints.usg.shape[0] != nsg: raise Exception('inconsistent dimension nsg, regarding idxsg, usg.') dims.nsg = nsg ns = nsbx + nsbu + nsh + nsg + nsphi wrong_field = "" if cost.Zl.shape[0] != ns: wrong_field = "Zl" dim = cost.Zl.shape[0] elif cost.Zu.shape[0] != ns: wrong_field = "Zu" dim = cost.Zu.shape[0] elif cost.zl.shape[0] != ns: wrong_field = "zl" dim = cost.zl.shape[0] elif cost.zu.shape[0] != ns: wrong_field = "zu" dim = cost.zu.shape[0] if wrong_field != "": raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\ + f'Detected ns = {ns} = nsbx + nsbu + nsg + nsh + nsphi.\n\t'\ + f'With nsbx = {nsbx}, nsbu = {nsbu}, nsg = {nsg}, nsh = {nsh}, nsphi = {nsphi}') dims.ns = ns nsbx_e = constraints.idxsbx_e.shape[0] if is_empty(constraints.lsbx_e): constraints.lsbx_e = np.zeros((nsbx_e,)) elif constraints.lsbx_e.shape[0] != nsbx_e: raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, lsbx_e.') if is_empty(constraints.usbx_e): constraints.usbx_e = np.zeros((nsbx_e,)) elif constraints.usbx_e.shape[0] != nsbx_e: raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, usbx_e.') dims.nsbx_e = nsbx_e nsh_e = constraints.idxsh_e.shape[0] if is_empty(constraints.lsh_e): constraints.lsh_e = np.zeros((nsh_e,)) elif constraints.lsh_e.shape[0] != nsh_e: raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, lsh_e.') if is_empty(constraints.ush_e): constraints.ush_e = np.zeros((nsh_e,)) elif constraints.ush_e.shape[0] != nsh_e: raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, ush_e.') dims.nsh_e = nsh_e nsg_e = constraints.idxsg_e.shape[0] if is_empty(constraints.lsg_e): constraints.lsg_e = np.zeros((nsg_e,)) elif constraints.lsg_e.shape[0] != nsg_e: raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, lsg_e.') if is_empty(constraints.usg_e): constraints.usg_e = np.zeros((nsg_e,)) elif constraints.usg_e.shape[0] != nsg_e: raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, usg_e.') dims.nsg_e = nsg_e nsphi_e = constraints.idxsphi_e.shape[0] if is_empty(constraints.lsphi_e): constraints.lsphi_e = np.zeros((nsphi_e,)) elif constraints.lsphi_e.shape[0] != nsphi_e: raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, lsphi_e.') if is_empty(constraints.usphi_e): constraints.usphi_e = np.zeros((nsphi_e,)) elif constraints.usphi_e.shape[0] != nsphi_e: raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, usphi_e.') dims.nsphi_e = nsphi_e # terminal ns_e = nsbx_e + nsh_e + nsg_e + nsphi_e wrong_field = "" if cost.Zl_e.shape[0] != ns_e: wrong_field = "Zl_e" dim = cost.Zl_e.shape[0] elif cost.Zu_e.shape[0] != ns_e: wrong_field = "Zu_e" dim = cost.Zu_e.shape[0] elif cost.zl_e.shape[0] != ns_e: wrong_field = "zl_e" dim = cost.zl_e.shape[0] elif cost.zu_e.shape[0] != ns_e: wrong_field = "zu_e" dim = cost.zu_e.shape[0] if wrong_field != "": raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\ + f'Detected ns_e = {ns_e} = nsbx_e + nsg_e + nsh_e + nsphi_e.\n\t'\ + f'With nsbx_e = {nsbx_e}, nsg_e = {nsg_e}, nsh_e = {nsh_e}, nsphi_e = {nsphi_e}') dims.ns_e = ns_e # discretization if is_empty(opts.time_steps) and is_empty(opts.shooting_nodes): # uniform discretization opts.time_steps = opts.tf / dims.N * np.ones((dims.N,)) elif not is_empty(opts.shooting_nodes): if np.shape(opts.shooting_nodes)[0] != dims.N+1: raise Exception('inconsistent dimension N, regarding shooting_nodes.') time_steps = np.zeros((dims.N,)) for i in range(dims.N): time_steps[i] = opts.shooting_nodes[i+1] - opts.shooting_nodes[i] opts.time_steps = time_steps elif (not is_empty(opts.time_steps)) and (not is_empty(opts.shooting_nodes)): Exception('Please provide either time_steps or shooting_nodes for nonuniform discretization') tf = np.sum(opts.time_steps) if (tf - opts.tf) / tf > 1e-15: raise Exception(f'Inconsistent discretization: {opts.tf}'\ f' = tf != sum(opts.time_steps) = {tf}.') def get_ocp_nlp_layout(): current_module = sys.modules[__name__] acados_path = os.path.dirname(current_module.__file__) with open(acados_path + '/acados_layout.json', 'r') as f: ocp_nlp_layout = json.load(f) return ocp_nlp_layout def ocp_formulation_json_dump(acados_ocp, json_file='acados_ocp_nlp.json'): # Load acados_ocp_nlp structure description ocp_layout = get_ocp_nlp_layout() # Copy input ocp object dictionary ocp_nlp_dict = dict(deepcopy(acados_ocp).__dict__) # TODO: maybe make one funciton with formatting for acados_struct, v in ocp_layout.items(): # skip non dict attributes if not isinstance(v, dict): continue # setattr(ocp_nlp, acados_struct, dict(getattr(acados_ocp, acados_struct).__dict__)) # Copy ocp object attributes dictionaries ocp_nlp_dict[acados_struct]=dict(getattr(acados_ocp, acados_struct).__dict__) ocp_nlp_dict = format_class_dict(ocp_nlp_dict) # strip symbolics ocp_nlp_dict['model'] = acados_model_strip_casadi_symbolics(ocp_nlp_dict['model']) # strip shooting_nodes ocp_nlp_dict['solver_options'].pop('shooting_nodes', None) dims_dict = acados_class2dict(acados_ocp.dims) ocp_check_against_layout(ocp_nlp_dict, dims_dict) with open(json_file, 'w') as f: json.dump(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True) def ocp_formulation_json_load(json_file='acados_ocp_nlp.json'): # Load acados_ocp_nlp structure description ocp_layout = get_ocp_nlp_layout() with open(json_file, 'r') as f: ocp_nlp_json = json.load(f) ocp_nlp_dict = json2dict(ocp_nlp_json, ocp_nlp_json['dims']) # Instantiate AcadosOcp object acados_ocp = AcadosOcp() # load class dict acados_ocp.__dict__ = ocp_nlp_dict # laod class attributes dict, dims, constraints, etc for acados_struct, v in ocp_layout.items(): # skip non dict attributes if not isinstance(v, dict): continue acados_attribute = getattr(acados_ocp, acados_struct) acados_attribute.__dict__ = ocp_nlp_dict[acados_struct] setattr(acados_ocp, acados_struct, acados_attribute) return acados_ocp def ocp_generate_external_functions(acados_ocp, model): model = make_model_consistent(model) if acados_ocp.solver_options.integrator_type == 'ERK': # explicit model -- generate C code generate_c_code_explicit_ode(model) elif acados_ocp.solver_options.integrator_type == 'IRK': # implicit model -- generate C code opts = dict(generate_hess=1) generate_c_code_implicit_ode(model, opts) elif acados_ocp.solver_options.integrator_type == 'GNSF': generate_c_code_gnsf(model) else: raise Exception("ocp_generate_external_functions: unknown integrator type.") if acados_ocp.solver_options.hessian_approx == 'EXACT': opts = dict(generate_hess=1) else: opts = dict(generate_hess=0) if acados_ocp.dims.nphi > 0 or acados_ocp.dims.nh > 0: generate_c_code_constraint(model, model.name, False, opts) if acados_ocp.dims.nphi_e > 0 or acados_ocp.dims.nh_e > 0: generate_c_code_constraint(model, model.name, True, opts) # dummy matrices if not acados_ocp.cost.cost_type == 'LINEAR_LS': acados_ocp.cost.Vx = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nx)) acados_ocp.cost.Vu = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nu)) if not acados_ocp.cost.cost_type_e == 'LINEAR_LS': acados_ocp.cost.Vx_e = np.zeros((acados_ocp.dims.ny_e, acados_ocp.dims.nx)) if acados_ocp.cost.cost_type == 'NONLINEAR_LS': generate_c_code_nls_cost(model, model.name, False) elif acados_ocp.cost.cost_type == 'EXTERNAL': generate_c_code_external_cost(model, False) if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS': generate_c_code_nls_cost(model, model.name, True) elif acados_ocp.cost.cost_type_e == 'EXTERNAL': generate_c_code_external_cost(model, True) def ocp_render_templates(acados_ocp, json_file): name = acados_ocp.model.name # setting up loader and environment json_path = '{cwd}/{json_file}'.format( cwd=os.getcwd(), json_file=json_file) if not os.path.exists(json_path): raise Exception('{} not found!'.format(json_path)) template_dir = 'c_generated_code/' ## Render templates in_file = 'main.in.c' out_file = 'main_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver.in.c' out_file = 'acados_solver_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver.in.h' out_file = 'acados_solver_{}.h'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'Makefile.in' out_file = 'Makefile' render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver_sfun.in.c' out_file = 'acados_solver_sfunction_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'make_sfun.in.m' out_file = 'make_sfun.m' render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_sim_solver.in.c' out_file = 'acados_sim_solver_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_sim_solver.in.h' out_file = 'acados_sim_solver_{}.h'.format(name) render_template(in_file, out_file, template_dir, json_path) ## folder model template_dir = 'c_generated_code/{}_model/'.format(name) in_file = 'model.in.h' out_file = '{}_model.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # constraints on convex over nonlinear function if acados_ocp.constraints.constr_type == 'BGP' and acados_ocp.dims.nphi > 0: # constraints on outer function template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'phi_constraint.in.h' out_file = '{}_phi_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal constraints on convex over nonlinear function if acados_ocp.constraints.constr_type_e == 'BGP' and acados_ocp.dims.nphi_e > 0: # terminal constraints on outer function template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'phi_e_constraint.in.h' out_file = '{}_phi_e_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # nonlinear constraints if acados_ocp.constraints.constr_type == 'BGH' and acados_ocp.dims.nh > 0: template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'h_constraint.in.h' out_file = '{}_h_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal nonlinear constraints if acados_ocp.constraints.constr_type_e == 'BGH' and acados_ocp.dims.nh_e > 0: template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'h_e_constraint.in.h' out_file = '{}_h_e_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # nonlinear cost function if acados_ocp.cost.cost_type == 'NONLINEAR_LS': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'cost_y_fun.in.h' out_file = '{}_cost_y_fun.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal nonlinear cost function if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'cost_y_e_fun.in.h' out_file = '{}_cost_y_e_fun.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # external cost if acados_ocp.cost.cost_type == 'EXTERNAL': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'external_cost.in.h' out_file = '{}_external_cost.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # external cost - terminal if acados_ocp.cost.cost_type_e == 'EXTERNAL': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'external_cost_e.in.h' out_file = '{}_external_cost_e.h'.format(name) render_template(in_file, out_file, template_dir, json_path) class AcadosOcpSolver: """ class to interact with the acados ocp solver C object """ def __init__(self, acados_ocp, json_file='acados_ocp_nlp.json'): self.solver_created = False model = acados_ocp.model # make dims consistent make_ocp_dims_consistent(acados_ocp) if acados_ocp.solver_options.integrator_type == 'GNSF': set_up_imported_gnsf_model(acados_ocp) # set integrator time automatically acados_ocp.solver_options.Tsim = acados_ocp.solver_options.time_steps[0] # generate external functions ocp_generate_external_functions(acados_ocp, model) # dump to json ocp_formulation_json_dump(acados_ocp, json_file) # render templates ocp_render_templates(acados_ocp, json_file) ## Compile solver os.chdir('c_generated_code') os.system('make clean_ocp_shared_lib') os.system('make ocp_shared_lib') os.chdir('..') self.shared_lib_name = 'c_generated_code/libacados_ocp_solver_' + model.name + '.so' # get self.shared_lib = CDLL(self.shared_lib_name) self.shared_lib.acados_create() self.solver_created = True self.shared_lib.acados_get_nlp_opts.restype = c_void_p self.nlp_opts = self.shared_lib.acados_get_nlp_opts() self.shared_lib.acados_get_nlp_dims.restype = c_void_p self.nlp_dims = self.shared_lib.acados_get_nlp_dims() self.shared_lib.acados_get_nlp_config.restype = c_void_p self.nlp_config = self.shared_lib.acados_get_nlp_config() self.shared_lib.acados_get_nlp_out.restype = c_void_p self.nlp_out = self.shared_lib.acados_get_nlp_out() self.shared_lib.acados_get_nlp_in.restype = c_void_p self.nlp_in = self.shared_lib.acados_get_nlp_in() self.shared_lib.acados_get_nlp_solver.restype = c_void_p self.nlp_solver = self.shared_lib.acados_get_nlp_solver() self.acados_ocp = acados_ocp def solve(self): """ solve the ocp with current input """ status = self.shared_lib.acados_solve() return status def get(self, stage_, field_): """ get the last solution of the solver: :param stage: integer corresponding to shooting node :param field_: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',] .. note:: regarding lam, t: \n the inequalities are internally organized in the following order: \n [ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi] .. note:: pi: multipliers for dynamics equality constraints \n lam: multipliers for inequalities \n t: slack variables corresponding to evaluation of all inequalities (at the solution) \n sl: slack variables of soft lower inequality constraints \n su: slack variables of soft upper inequality constraints \n """ out_fields = ['x', 'u', 'z', 'pi', 'lam', 't'] mem_fields = ['sl', 'su'] field = field_ field = field.encode('utf-8') if (field_ not in out_fields + mem_fields): raise Exception('AcadosOcpSolver.get(): {} is an invalid argument.\ \n Possible values are {}. Exiting.'.format(field_, out_fields + mem_fields)) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field) out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) if (field_ in out_fields): self.shared_lib.ocp_nlp_out_get.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_get(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, out_data) elif field_ in mem_fields: self.shared_lib.ocp_nlp_get_at_stage.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \ self.nlp_dims, self.nlp_solver, stage_, field, out_data) return out def print_statistics(self): stat = self.get_stats("statistics") if self.acados_ocp.solver_options.nlp_solver_type == 'SQP': print('\niter\tres_stat\tres_eq\t\tres_ineq\tres_comp\tqp_stat\tqp_iter') if stat.shape[0]>7: print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp') for jj in range(stat.shape[1]): print('{:d}\t{:e}\t{:e}\t{:e}\t{:e}\t{:d}\t{:d}'.format( \ int(stat[0][jj]), stat[1][jj], stat[2][jj], \ stat[3][jj], stat[4][jj], int(stat[5][jj]), int(stat[6][jj]))) if stat.shape[0]>7: print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \ stat[7][jj], stat[8][jj], stat[9][jj], stat[10][jj])) print('\n') elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI': print('\niter\tqp_stat\tqp_iter') if stat.shape[0]>3: print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp') for jj in range(stat.shape[1]): print('{:d}\t{:d}\t{:d}'.format( int(stat[0][jj]), int(stat[1][jj]), int(stat[2][jj]))) if stat.shape[0]>3: print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \ stat[3][jj], stat[4][jj], stat[5][jj], stat[6][jj])) print('\n') return def get_stats(self, field_): """ get the information of the last solver call: :param field_: string in ['statistics', 'time_tot', 'time_lin', 'time_sim', 'time_sim_ad', 'time_sim_la', 'time_qp', 'time_qp_solver_call', 'time_reg', 'sqp_iter'] """ fields = ['time_tot', # total cpu time previous call 'time_lin', # cpu time for linearization 'time_sim', # cpu time for integrator 'time_sim_ad', # cpu time for integrator contribution of external function calls 'time_sim_la', # cpu time for integrator contribution of linear algebra 'time_qp', # cpu time qp solution 'time_qp_solver_call', # cpu time inside qp solver (without converting the QP) 'time_qp_xcond', 'time_reg', # cpu time regularization 'sqp_iter', # number of SQP iterations 'statistics', # table with info about last iteration 'stat_m', 'stat_n', ] field = field_ field = field.encode('utf-8') if (field_ not in fields): raise Exception('AcadosOcpSolver.get_stats(): {} is not a valid argument.\ \n Possible values are {}. Exiting.'.format(fields, fields)) if field_ in ['sqp_iter', 'stat_m', 'stat_n']: out = np.ascontiguousarray(np.zeros((1,)), dtype=np.int64) out_data = cast(out.ctypes.data, POINTER(c_int64)) elif field_ == 'statistics': sqp_iter = self.get_stats("sqp_iter") stat_m = self.get_stats("stat_m") stat_n = self.get_stats("stat_n") min_size = min([stat_m, sqp_iter+1]) out = np.ascontiguousarray( np.zeros( (stat_n[0]+1, min_size[0]) ), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) else: out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p] self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data) return out # Note: this function should not be used anymore, better use cost_set, constraints_set def set(self, stage_, field_, value_): cost_fields = ['y_ref', 'yref'] constraints_fields = ['lbx', 'ubx', 'lbu', 'ubu'] out_fields = ['x', 'u', 'pi', 'lam', 't'] # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) # treat parameters separately if field_ is 'p': self.shared_lib.acados_update_params.argtypes = [c_int, POINTER(c_double)] self.shared_lib.acados_update_params.restype = c_int value_data = cast(value_.ctypes.data, POINTER(c_double)) self.shared_lib.acados_update_params(stage, value_data, value_.shape[0]) else: if field_ not in constraints_fields + cost_fields + out_fields: raise Exception("AcadosOcpSolver.set(): {} is not a valid argument.\ \nPossible values are {}. Exiting.".format(field, \ constraints_fields + cost_fields + out_fields + ['p'])) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field) if value_.shape[0] != dims: msg = 'AcadosOcpSolver.set(): mismatching dimension for field "{}" '.format(field_) msg += 'with dimension {} (you have {})'.format(dims, value_.shape[0]) raise Exception(msg) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) if field_ in constraints_fields: self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in cost_fields: self.shared_lib.ocp_nlp_cost_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in out_fields: self.shared_lib.ocp_nlp_out_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_set(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage, field, value_data_p) return def cost_set(self, stage_, field_, value_): """ set numerical data in the cost module of the solver: :param stage_: integer corresponding to shooting node :param field_: string, e.g. 'yref', 'W', 'ext_cost_num_hess' :param value_: of appropriate size """ # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) self.shared_lib.ocp_nlp_cost_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_cost_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, dims_data) value_shape = value_.shape if len(value_shape) == 1: value_shape = (value_shape[0], 0) if value_shape != tuple(dims): raise Exception('AcadosOcpSolver.cost_set(): mismatching dimension', \ ' for field "{}" with dimension {} (you have {})'.format( \ field_, tuple(dims), value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_cost_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) return def constraints_set(self, stage_, field_, value_): """ set numerical data in the constraint module of the solver: Parameters: :param stage_: integer corresponding to shooting node :param field_: string, e.g. 'lbx' :param value_: of appropriate size """ # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, dims_data) value_shape = value_.shape if len(value_shape) == 1: value_shape = (value_shape[0], 0) if value_shape != tuple(dims): raise Exception('AcadosOcpSolver.constraints_set(): mismatching dimension' \ ' for field "{}" with dimension {} (you have {})'.format(field_, tuple(dims), value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) return def options_set(self, field_, value_): """ set options of the solver: Parameters: :param field_: string, e.g. 'print_level', 'rti_phase', 'initialize_t_slacks', 'step_length' :param value_: of type int, float """ int_fields = ['print_level', 'rti_phase', 'initialize_t_slacks'] double_fields = ['step_length'] string_fields = ['globalization'] if field_ in int_fields: if not isinstance(value_, int): raise Exception('solver option {} must be of type int. You have {}.'.format(field_, type(value_))) else: value_ctypes = c_int(value_) elif field_ in double_fields: if not isinstance(value_, float): raise Exception('solver option {} must be of type float. You have {}.'.format(field_, type(value_))) else: value_ctypes = c_double(value_) elif field_ in string_fields: if not isinstance(value_, str): raise Exception('solver option {} must be of type str. You have {}.'.format(field_, type(value_))) else: value_ctypes = value_.encode('utf-8') if field_ == 'rti_phase': if value_ < 0 or value_ > 2: raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can ' 'take only values 0, 1, 2 for SQP-RTI-type solvers') if self.acados_ocp.solver_options.nlp_solver_type != 'SQP_RTI' and value_ > 0: raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can ' 'take only value 0 for SQP-type solvers') field = field_ field = field.encode('utf-8') if field_ in string_fields: self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \ [c_void_p, c_void_p, c_char_p, c_char_p] self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \ self.nlp_opts, field, value_ctypes) else: self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \ [c_void_p, c_void_p, c_char_p, c_void_p] self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \ self.nlp_opts, field, byref(value_ctypes)) return def __del__(self): if self.solver_created: self.shared_lib.acados_free() del self.shared_lib # NOTE: DLL cannot be easily unloaded!!! # see https://stackoverflow.com/questions/359498/how-can-i-unload-a-dll-using-ctypes-in-python # while isLoaded(self.shared_lib_name): # dlclose(handle)
1.148438
1
noise/estimation/PCA/analyticNoiseEstimation_PCA.py
MaikWischow/Camera-Condition-Monitoring
3
2598
<reponame>MaikWischow/Camera-Condition-Monitoring<gh_stars>1-10 import numpy as np import cv2 import sys import os import glob def im2patch(im, pch_size, stride=1): ''' Transform image to patches. Input: im: 3 x H x W or 1 X H x W image, numpy format pch_size: (int, int) tuple or integer stride: (int, int) tuple or integer ''' if isinstance(pch_size, tuple): pch_H, pch_W = pch_size elif isinstance(pch_size, int): pch_H = pch_W = pch_size else: sys.exit('The input of pch_size must be a integer or a int tuple!') if isinstance(stride, tuple): stride_H, stride_W = stride elif isinstance(stride, int): stride_H = stride_W = stride else: sys.exit('The input of stride must be a integer or a int tuple!') C, H, W = im.shape num_H = len(range(0, H-pch_H+1, stride_H)) num_W = len(range(0, W-pch_W+1, stride_W)) num_pch = num_H * num_W pch = np.zeros((C, pch_H*pch_W, num_pch), dtype=im.dtype) kk = 0 for ii in range(pch_H): for jj in range(pch_W): temp = im[:, ii:H-pch_H+ii+1:stride_H, jj:W-pch_W+jj+1:stride_W] pch[:, kk, :] = temp.reshape((C, num_pch)) kk += 1 return pch.reshape((C, pch_H, pch_W, num_pch)) def noise_estimate(im, pch_size=8): ''' Implement of noise level estimation of the following paper: <NAME> , <NAME> , <NAME> . An Efficient Statistical Method for Image Noise Level Estimation[C]// 2015 IEEE International Conference on Computer Vision (ICCV). IEEE Computer Society, 2015. Input: im: the noise image, H x W x 3 or H x W numpy tensor, range [0,1] pch_size: patch_size Output: noise_level: the estimated noise level ''' if im.ndim == 3: im = im.transpose((2, 0, 1)) else: im = np.expand_dims(im, axis=0) # image to patch pch = im2patch(im, pch_size, 3) # C x pch_size x pch_size x num_pch tensor num_pch = pch.shape[3] pch = pch.reshape((-1, num_pch)) # d x num_pch matrix d = pch.shape[0] mu = pch.mean(axis=1, keepdims=True) # d x 1 X = pch - mu sigma_X = np.matmul(X, X.transpose()) / num_pch sig_value, _ = np.linalg.eigh(sigma_X) sig_value.sort() for ii in range(-1, -d-1, -1): tau = np.mean(sig_value[:ii]) if np.sum(sig_value[:ii]>tau) == np.sum(sig_value[:ii] < tau): return np.sqrt(tau) def run(imgPath, patchSize, internalNumPatches, dirOut, saveResults=True): """ Estimates the standard deviation of (additive white gaussian) noise of image patches. The noise is estimated patch by patch. Based on: "An Efficient Statistical Method for Image Noise Level Estimation" (2015) :param imgPath: Path to the input image. :param patchSize: Image patch size. :param internalNumPatches: Internal number of sub-image-patches. :param dirOut: Directory where to save the noise estimation results. :param saveResults: Whether to save the estimation results or not. :return: None """ # Load image img = np.array(cv2.imread(imgPath)) try: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = img / 255.0 h, w = img.shape psize = min(min(patchSize, h), w) psize -= psize % 2 patch_step = psize shift_factor = 2 # Result array estimatedNoiseMap = np.zeros([h, w], dtype=np.int8) rangex = range(0, w, patch_step) rangey = range(0, h, patch_step) for start_x in rangex: for start_y in rangey: end_x = start_x + psize end_y = start_y + psize if end_x > w: end_x = w end_x = shift_factor * ((end_x) // shift_factor) start_x = end_x - psize if end_y > h: end_y = h end_y = shift_factor * ((end_y) // shift_factor) start_y = end_y - psize tileM = img[start_y:end_y, start_x:end_x] h_, w_ = tileM.shape sigma = noise_estimate(tileM, internalNumPatches) * 255.0 estimatedNoiseMap[start_y :start_y + h_, start_x : start_x + w_] = sigma if saveResults: if dirOut is not None: imgName = imgPath.split(os.sep)[-1].split(".")[0] dirOut = os.path.join(dirOut) if not os.path.exists(dirOut): os.makedirs(dirOut) noiseMapPath = os.path.join(dirOut, imgName + ".npz") if not os.path.exists(noiseMapPath): np.savez_compressed(noiseMapPath, estimatedNoiseMap) return estimatedNoiseMap except: return None # Example # if __name__ == '__main__': # dirIn = r"../../../data/udacity/img/GT" # dirOut = r"../../../data/udacity/labels_noise_patchwise/PCA" # imgFileEnding = ".jpg" # for imgPath in glob.glob(os.path.join(dirIn, "*" + imgFileEnding)): # run(imgPath, 128, 8, dirOut)
2.515625
3
src/biotite/application/application.py
claudejrogers/biotite
0
2599
# This source code is part of the Biotite package and is distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst' for further # information. __name__ = "biotite.application" __author__ = "<NAME>" __all__ = ["Application", "AppStateError", "TimeoutError", "VersionError", "AppState", "requires_state"] import abc import time from functools import wraps from enum import Flag, auto class AppState(Flag): """ This enum type represents the app states of an application. """ CREATED = auto() RUNNING = auto() FINISHED = auto() JOINED = auto() CANCELLED = auto() def requires_state(app_state): """ A decorator for methods of :class:`Application` subclasses that raises an :class:`AppStateError` in case the method is called, when the :class:`Application` is not in the specified :class:`AppState` `app_state`. Parameters ---------- app_state : AppState The required app state. Examples -------- Raises :class:`AppStateError` when `function` is called, if :class:`Application` is not in one of the specified states: >>> @requires_state(AppState.RUNNING | AppState.FINISHED) ... def function(self): ... pass """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): # First parameter of method is always 'self' instance = args[0] if not instance._state & app_state: raise AppStateError( f"The application is in {instance.get_app_state()} state, " f"but {app_state} state is required" ) return func(*args, **kwargs) return wrapper return decorator class Application(metaclass=abc.ABCMeta): """ This class is a wrapper around an external piece of runnable software in any sense. Subclasses of this abstract base class specify the respective kind of software and the way of interacting with it. Every :class:`Application` runs through a different app states (instances of enum :class:`AppState`) from its creation until its termination: Directly after its instantiation the app is in the *CREATED* state. In this state further parameters can be set for the application run. After the user calls the :func:`start()` method, the app state is set to *RUNNING* and the :class:`Application` type specific :func:`run()` method is called. When the application finishes the AppState changes to *FINISHED*. This is checked via the :class:`Application` type specific :func:`is_finished()` method. The user can now call the :func:`join()` method, concluding the application in the *JOINED* state and making the results of the application accessible by executing the :class:`Application` type specific :func:`evaluate()` method. Furthermore this executes the :class:`Application` type specific :func:`clean_up()` method. :func:`join()` can even be called in the *RUNNING* state: This will constantly check :func:`is_finished()` and will directly go into the *JOINED* state as soon as the application reaches the *FINISHED* state. Calling the :func:`cancel()` method while the application is *RUNNING* or *FINISHED* leaves the application in the *CANCELLED* state. This triggers the :func:`clean_up()` method, too, but there are no accessible results. If a method is called in an unsuitable app state, an :class:`AppStateError` is called. The application run behaves like an additional thread: Between the call of :func:`start()` and :func:`join()` other Python code can be executed, while the application runs in the background. """ def __init__(self): self._state = AppState.CREATED @requires_state(AppState.CREATED) def start(self): """ Start the application run and set its state to *RUNNING*. This can only be done from the *CREATED* state. """ self.run() self._start_time = time.time() self._state = AppState.RUNNING @requires_state(AppState.RUNNING | AppState.FINISHED) def join(self, timeout=None): """ Conclude the application run and set its state to *JOINED*. This can only be done from the *RUNNING* or *FINISHED* state. If the application is *FINISHED* the joining process happens immediately, if otherwise the application is *RUNNING*, this method waits until the application is *FINISHED*. Parameters ---------- timeout : float, optional If this parameter is specified, the :class:`Application` only waits for finishing until this value (in seconds) runs out. After this time is exceeded a :class:`TimeoutError` is raised and the application is cancelled. Raises ------ TimeoutError If the joining process exceeds the `timeout` value. """ time.sleep(self.wait_interval()) while self.get_app_state() != AppState.FINISHED: if timeout is not None and time.time()-self._start_time > timeout: self.cancel() raise TimeoutError( f"The application expired its timeout " f"({timeout:.1f} s)" ) else: time.sleep(self.wait_interval()) time.sleep(self.wait_interval()) try: self.evaluate() except AppStateError: raise except: self._state = AppState.CANCELLED raise else: self._state = AppState.JOINED self.clean_up() @requires_state(AppState.RUNNING | AppState.FINISHED) def cancel(self): """ Cancel the application when in *RUNNING* or *FINISHED* state. """ self._state = AppState.CANCELLED self.clean_up() def get_app_state(self): """ Get the current app state. Returns ------- app_state : AppState The current app state. """ if self._state == AppState.RUNNING: if self.is_finished(): self._state = AppState.FINISHED return self._state @abc.abstractmethod def run(self): """ Commence the application run. Called in :func:`start()`. PROTECTED: Override when inheriting. """ pass @abc.abstractmethod def is_finished(self): """ Check if the application has finished. PROTECTED: Override when inheriting. Returns ------- finished : bool True of the application has finished, false otherwise """ pass @abc.abstractmethod def wait_interval(self): """ The time interval of :func:`is_finished()` calls in the joining process. PROTECTED: Override when inheriting. Returns ------- interval : float Time (in seconds) between calls of :func:`is_finished()` in :func:`join()` """ pass @abc.abstractmethod def evaluate(self): """ Evaluate application results. Called in :func:`join()`. PROTECTED: Override when inheriting. """ pass def clean_up(self): """ Do clean up work after the application terminates. PROTECTED: Optionally override when inheriting. """ pass class AppStateError(Exception): """ Indicate that the application lifecycle was violated. """ pass class TimeoutError(Exception): """ Indicate that the application's timeout expired. """ pass class VersionError(Exception): """ Indicate that the application's version is invalid. """ pass
2.625
3