max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
python/paddle/tensor/attribute.py | douch/Paddle | 1 | 2600 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..framework import core
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype
# TODO: define functions to get tensor attributes
from ..fluid.layers import rank # noqa: F401
from ..fluid.layers import shape # noqa: F401
import paddle
from paddle import _C_ops
from paddle.static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
__all__ = []
def _complex_to_real_dtype(dtype):
if dtype == core.VarDesc.VarType.COMPLEX64:
return core.VarDesc.VarType.FP32
elif dtype == core.VarDesc.VarType.COMPLEX128:
return core.VarDesc.VarType.FP64
else:
return dtype
def _real_to_complex_dtype(dtype):
if dtype == core.VarDesc.VarType.FP32:
return core.VarDesc.VarType.COMPLEX64
elif dtype == core.VarDesc.VarType.FP64:
return core.VarDesc.VarType.COMPLEX128
else:
return dtype
def is_complex(x):
"""Return whether x is a tensor of complex data type(complex64 or complex128).
Args:
x (Tensor): The input tensor.
Returns:
bool: True if the data type of the input is complex data type, otherwise false.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1 + 2j, 3 + 4j])
print(paddle.is_complex(x))
# True
x = paddle.to_tensor([1.1, 1.2])
print(paddle.is_complex(x))
# False
x = paddle.to_tensor([1, 2, 3])
print(paddle.is_complex(x))
# False
"""
if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):
raise TypeError("Expected Tensor, but received type of x: {}".format(
type(x)))
dtype = x.dtype
is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or
dtype == core.VarDesc.VarType.COMPLEX128)
return is_complex_dtype
def is_floating_point(x):
"""
Returns whether the dtype of `x` is one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16.
Args:
x (Tensor): The input tensor.
Returns:
bool: True if the dtype of `x` is floating type, otherwise false.
Examples:
.. code-block:: python
import paddle
x = paddle.arange(1., 5., dtype='float32')
y = paddle.arange(1, 5, dtype='int32')
print(paddle.is_floating_point(x))
# True
print(paddle.is_floating_point(y))
# False
"""
if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):
raise TypeError("Expected Tensor, but received type of x: {}".format(
type(x)))
dtype = x.dtype
is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or
dtype == core.VarDesc.VarType.FP64 or
dtype == core.VarDesc.VarType.FP16 or
dtype == core.VarDesc.VarType.BF16)
return is_fp_dtype
def is_integer(x):
"""Return whether x is a tensor of integeral data type.
Args:
x (Tensor): The input tensor.
Returns:
bool: True if the data type of the input is integer data type, otherwise false.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1 + 2j, 3 + 4j])
print(paddle.is_integer(x))
# False
x = paddle.to_tensor([1.1, 1.2])
print(paddle.is_integer(x))
# False
x = paddle.to_tensor([1, 2, 3])
print(paddle.is_integer(x))
# True
"""
if not isinstance(x, (paddle.Tensor, paddle.static.Variable)):
raise TypeError("Expected Tensor, but received type of x: {}".format(
type(x)))
dtype = x.dtype
is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or
dtype == core.VarDesc.VarType.INT8 or
dtype == core.VarDesc.VarType.INT16 or
dtype == core.VarDesc.VarType.INT32 or
dtype == core.VarDesc.VarType.INT64)
return is_int_dtype
def real(x, name=None):
"""
Returns a new tensor containing real values of the input tensor.
Args:
x (Tensor): the input tensor, its data type could be complex64 or complex128.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: a tensor containing real values of the input tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(
[[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]])
# Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1+6j), (2+5j), (3+4j)],
# [(4+3j), (5+2j), (6+1j)]])
real_res = paddle.real(x)
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
real_t = x.real()
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[1., 2., 3.],
# [4., 5., 6.]])
"""
if in_dygraph_mode():
return _C_ops.final_state_real(x)
if _in_legacy_dygraph():
return _C_ops.real(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real')
helper = LayerHelper('real', **locals())
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(helper.input_dtype()))
helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out})
return out
def imag(x, name=None):
"""
Returns a new tensor containing imaginary values of input tensor.
Args:
x (Tensor): the input tensor, its data type could be complex64 or complex128.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: a tensor containing imaginary values of the input tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor(
[[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]])
# Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1+6j), (2+5j), (3+4j)],
# [(4+3j), (5+2j), (6+1j)]])
imag_res = paddle.imag(x)
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[6., 5., 4.],
# [3., 2., 1.]])
imag_t = x.imag()
# Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[6., 5., 4.],
# [3., 2., 1.]])
"""
if in_dygraph_mode():
return _C_ops.final_state_imag(x)
if _in_legacy_dygraph():
return _C_ops.imag(x)
check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag')
helper = LayerHelper('imag', **locals())
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(helper.input_dtype()))
helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out})
return out
| 2.046875 | 2 |
ocdsmerge/exceptions.py | open-contracting/ocds-merge | 4 | 2601 | <reponame>open-contracting/ocds-merge
class OCDSMergeError(Exception):
"""Base class for exceptions from within this package"""
class MissingDateKeyError(OCDSMergeError, KeyError):
"""Raised when a release is missing a 'date' key"""
def __init__(self, key, message):
self.key = key
self.message = message
def __str__(self):
return str(self.message)
class NonObjectReleaseError(OCDSMergeError, TypeError):
"""Raised when a release is not an object"""
class NullDateValueError(OCDSMergeError, TypeError):
"""Raised when a release has a null 'date' value"""
class NonStringDateValueError(OCDSMergeError, TypeError):
"""Raised when a release has a non-string 'date' value"""
class InconsistentTypeError(OCDSMergeError, TypeError):
"""Raised when a path is a literal and an object in different releases"""
class OCDSMergeWarning(UserWarning):
"""Base class for warnings from within this package"""
class DuplicateIdValueWarning(OCDSMergeWarning):
"""Used when at least two objects in the same array have the same value for the 'id' field"""
def __init__(self, path, id, message):
self.path = path
self.id = id
self.message = message
def __str__(self):
return str(self.message)
| 2.46875 | 2 |
appcodec.py | guardhunt/TelemterRC | 0 | 2602 | <filename>appcodec.py
import evdev
import time
import struct
class appcodec():
def __init__(self):
self.device = evdev.InputDevice("/dev/input/event2")
self.capabilities = self.device.capabilities(verbose=True)
self.capaRAW = self.device.capabilities(absinfo=False)
self.config = {}
self.state = {}
def build(self):
"""build state dictionary for controller"""
#build config dictionary by code and name
for key, value in self.capabilities.items():
for element in value:
if type(element[0]) is tuple:
self.config[element[0][1]] = element[0][0]
elif type(element[0]) is list:
self.config[element[1]] = element[0][0]
elif ("SYN" in str(element[0])) or ("FF" in str(element[0])):
pass
else:
self.config[element[1]] = element[0]
#build state dictionary from raw codes
for code in self.capaRAW[1]:
self.state[self.config[code]] = 0
for code in self.capaRAW[3]:
self.state[self.config[code]] = 0
print("waiting for event")
for event in self.device.read_loop():
if event.type == evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS:
return(self.update_state(event))
def update_state(self, event):
self.state[self.config[event.code]] = event.value
buttons1_state = 0
buttons1_state = buttons1_state | self.state["BTN_A"]
buttons1_state = buttons1_state | self.state["BTN_B"] << 1
buttons1_state = buttons1_state | self.state["BTN_NORTH"] << 2
buttons1_state = buttons1_state | self.state["BTN_WEST"] << 3
buttons2_state = 0
buttons2_state = buttons2_state | self.state["BTN_START"]
buttons2_state = buttons2_state | self.state["BTN_MODE"] << 1
buttons2_state = buttons2_state | self.state["BTN_SELECT"] << 2
buttons2_state = buttons2_state | self.state["BTN_TR"] << 3
buttons2_state = buttons2_state | self.state["BTN_TL"] << 4
packet = struct.pack('6h2c', self.state["ABS_X"], self.state["ABS_Y"], self.state["ABS_RX"], self.state["ABS_RY"], self.state["ABS_HAT0X"], self.state["ABS_HAT0Y"], buttons1_state.to_bytes(1, byteorder="big"), buttons2_state.to_bytes(1, byteorder="big"))
return packet
def decode(self, packet):
buttons = []
state = packet[14:30]
state = struct.unpack('6h2B2c', state)
buttons1 = state[8]
buttons2 = state[9]
holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder="big"))
holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder="big"))
for i in holder1:
buttons.append(int(i))
for i in holder2:
buttons.append(int(i))
state = list(state[ :7]) + buttons
return state
| 2.828125 | 3 |
scripts/examples/OpenMV/16-Codes/find_barcodes.py | jiskra/openmv | 1,761 | 2603 | # Barcode Example
#
# This example shows off how easy it is to detect bar codes using the
# OpenMV Cam M7. Barcode detection does not work on the M4 Camera.
import sensor, image, time, math
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.VGA) # High Res!
sensor.set_windowing((640, 80)) # V Res of 80 == less work (40 for 2X the speed).
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
clock = time.clock()
# Barcode detection can run at the full 640x480 resolution of your OpenMV Cam's
# OV7725 camera module. Barcode detection will also work in RGB565 mode but at
# a lower resolution. That said, barcode detection requires a higher resolution
# to work well so it should always be run at 640x480 in grayscale...
def barcode_name(code):
if(code.type() == image.EAN2):
return "EAN2"
if(code.type() == image.EAN5):
return "EAN5"
if(code.type() == image.EAN8):
return "EAN8"
if(code.type() == image.UPCE):
return "UPCE"
if(code.type() == image.ISBN10):
return "ISBN10"
if(code.type() == image.UPCA):
return "UPCA"
if(code.type() == image.EAN13):
return "EAN13"
if(code.type() == image.ISBN13):
return "ISBN13"
if(code.type() == image.I25):
return "I25"
if(code.type() == image.DATABAR):
return "DATABAR"
if(code.type() == image.DATABAR_EXP):
return "DATABAR_EXP"
if(code.type() == image.CODABAR):
return "CODABAR"
if(code.type() == image.CODE39):
return "CODE39"
if(code.type() == image.PDF417):
return "PDF417"
if(code.type() == image.CODE93):
return "CODE93"
if(code.type() == image.CODE128):
return "CODE128"
while(True):
clock.tick()
img = sensor.snapshot()
codes = img.find_barcodes()
for code in codes:
img.draw_rectangle(code.rect())
print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), clock.fps())
print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args)
if not codes:
print("FPS %f" % clock.fps())
| 3.265625 | 3 |
Python/factorial.py | devaslooper/Code-Overflow | 0 | 2604 | <reponame>devaslooper/Code-Overflow
n=int(input("Enter number "))
fact=1
for i in range(1,n+1):
fact=fact*i
print("Factorial is ",fact)
| 3.796875 | 4 |
mundo 3/099.py | thiagofreitascarneiro/Curso-de-Python---Curso-em-Video | 1 | 2605 | <reponame>thiagofreitascarneiro/Curso-de-Python---Curso-em-Video
import time
# O * é para desempacotar o paramêtro. Permite atribuir inumeros parametros.
def maior(* num):
contador = maior = 0
print('Analisando os valores passados...')
for v in num:
contador = contador + 1
print(f'{v} ', end='', flush=True)
time.sleep(0.3)
if contador == 1:
maior = v
else:
if v > maior:
maior = v
print(f'Foram informado o total de {len(num)}')
print(f'O maior valor informado foi {max(num)}')
print(30 * '-')
maior(2, 1, 7)
maior(5, 4, 7, 9, 2)
maior(1, 4, 7, 20, 2)
maior(0)
| 3.734375 | 4 |
tests/test_packed_to_padded.py | theycallmepeter/pytorch3d_PBR | 0 | 2606 | <gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d.ops import packed_to_padded, padded_to_packed
from pytorch3d.structures.meshes import Meshes
class TestPackedToPadded(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
@staticmethod
def init_meshes(
num_meshes: int = 10,
num_verts: int = 1000,
num_faces: int = 3000,
device: str = "cpu",
):
device = torch.device(device)
verts_list = []
faces_list = []
for _ in range(num_meshes):
verts = torch.rand((num_verts, 3), dtype=torch.float32, device=device)
faces = torch.randint(
num_verts, size=(num_faces, 3), dtype=torch.int64, device=device
)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts_list, faces_list)
return meshes
@staticmethod
def packed_to_padded_python(inputs, first_idxs, max_size, device):
"""
PyTorch implementation of packed_to_padded function.
"""
num_meshes = first_idxs.size(0)
D = inputs.shape[1] if inputs.dim() == 2 else 0
if D == 0:
inputs_padded = torch.zeros((num_meshes, max_size), device=device)
else:
inputs_padded = torch.zeros((num_meshes, max_size, D), device=device)
for m in range(num_meshes):
s = first_idxs[m]
if m == num_meshes - 1:
f = inputs.shape[0]
else:
f = first_idxs[m + 1]
inputs_padded[m, :f] = inputs[s:f]
return inputs_padded
@staticmethod
def padded_to_packed_python(inputs, first_idxs, num_inputs, device):
"""
PyTorch implementation of padded_to_packed function.
"""
num_meshes = inputs.size(0)
D = inputs.shape[2] if inputs.dim() == 3 else 0
if D == 0:
inputs_packed = torch.zeros((num_inputs,), device=device)
else:
inputs_packed = torch.zeros((num_inputs, D), device=device)
for m in range(num_meshes):
s = first_idxs[m]
if m == num_meshes - 1:
f = num_inputs
else:
f = first_idxs[m + 1]
inputs_packed[s:f] = inputs[m, :f]
return inputs_packed
def _test_packed_to_padded_helper(self, D, device):
"""
Check the results from packed_to_padded and PyTorch implementations
are the same.
"""
meshes = self.init_meshes(16, 100, 300, device=device)
faces = meshes.faces_packed()
mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_faces = meshes.num_faces_per_mesh().max().item()
if D == 0:
values = torch.rand((faces.shape[0],), device=device, requires_grad=True)
else:
values = torch.rand((faces.shape[0], D), device=device, requires_grad=True)
values_torch = values.detach().clone()
values_torch.requires_grad = True
values_padded = packed_to_padded(
values, mesh_to_faces_packed_first_idx, max_faces
)
values_padded_torch = TestPackedToPadded.packed_to_padded_python(
values_torch, mesh_to_faces_packed_first_idx, max_faces, device
)
# check forward
self.assertClose(values_padded, values_padded_torch)
# check backward
if D == 0:
grad_inputs = torch.rand((len(meshes), max_faces), device=device)
else:
grad_inputs = torch.rand((len(meshes), max_faces, D), device=device)
values_padded.backward(grad_inputs)
grad_outputs = values.grad
values_padded_torch.backward(grad_inputs)
grad_outputs_torch1 = values_torch.grad
grad_outputs_torch2 = TestPackedToPadded.padded_to_packed_python(
grad_inputs, mesh_to_faces_packed_first_idx, values.size(0), device=device
)
self.assertClose(grad_outputs, grad_outputs_torch1)
self.assertClose(grad_outputs, grad_outputs_torch2)
def test_packed_to_padded_flat_cpu(self):
self._test_packed_to_padded_helper(0, "cpu")
def test_packed_to_padded_D1_cpu(self):
self._test_packed_to_padded_helper(1, "cpu")
def test_packed_to_padded_D16_cpu(self):
self._test_packed_to_padded_helper(16, "cpu")
def test_packed_to_padded_flat_cuda(self):
device = get_random_cuda_device()
self._test_packed_to_padded_helper(0, device)
def test_packed_to_padded_D1_cuda(self):
device = get_random_cuda_device()
self._test_packed_to_padded_helper(1, device)
def test_packed_to_padded_D16_cuda(self):
device = get_random_cuda_device()
self._test_packed_to_padded_helper(16, device)
def _test_padded_to_packed_helper(self, D, device):
"""
Check the results from packed_to_padded and PyTorch implementations
are the same.
"""
meshes = self.init_meshes(16, 100, 300, device=device)
mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
num_faces_per_mesh = meshes.num_faces_per_mesh()
max_faces = num_faces_per_mesh.max().item()
if D == 0:
values = torch.rand((len(meshes), max_faces), device=device)
else:
values = torch.rand((len(meshes), max_faces, D), device=device)
for i, num in enumerate(num_faces_per_mesh):
values[i, num:] = 0
values.requires_grad = True
values_torch = values.detach().clone()
values_torch.requires_grad = True
values_packed = padded_to_packed(
values, mesh_to_faces_packed_first_idx, num_faces_per_mesh.sum().item()
)
values_packed_torch = TestPackedToPadded.padded_to_packed_python(
values_torch,
mesh_to_faces_packed_first_idx,
num_faces_per_mesh.sum().item(),
device,
)
# check forward
self.assertClose(values_packed, values_packed_torch)
# check backward
if D == 0:
grad_inputs = torch.rand((num_faces_per_mesh.sum().item()), device=device)
else:
grad_inputs = torch.rand(
(num_faces_per_mesh.sum().item(), D), device=device
)
values_packed.backward(grad_inputs)
grad_outputs = values.grad
values_packed_torch.backward(grad_inputs)
grad_outputs_torch1 = values_torch.grad
grad_outputs_torch2 = TestPackedToPadded.packed_to_padded_python(
grad_inputs, mesh_to_faces_packed_first_idx, values.size(1), device=device
)
self.assertClose(grad_outputs, grad_outputs_torch1)
self.assertClose(grad_outputs, grad_outputs_torch2)
def test_padded_to_packed_flat_cpu(self):
self._test_padded_to_packed_helper(0, "cpu")
def test_padded_to_packed_D1_cpu(self):
self._test_padded_to_packed_helper(1, "cpu")
def test_padded_to_packed_D16_cpu(self):
self._test_padded_to_packed_helper(16, "cpu")
def test_padded_to_packed_flat_cuda(self):
device = get_random_cuda_device()
self._test_padded_to_packed_helper(0, device)
def test_padded_to_packed_D1_cuda(self):
device = get_random_cuda_device()
self._test_padded_to_packed_helper(1, device)
def test_padded_to_packed_D16_cuda(self):
device = get_random_cuda_device()
self._test_padded_to_packed_helper(16, device)
def test_invalid_inputs_shapes(self, device="cuda:0"):
with self.assertRaisesRegex(ValueError, "input can only be 2-dimensional."):
values = torch.rand((100, 50, 2), device=device)
first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
packed_to_padded(values, first_idxs, 100)
with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."):
values = torch.rand((100,), device=device)
first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
padded_to_packed(values, first_idxs, 20)
with self.assertRaisesRegex(ValueError, "input can only be 3-dimensional."):
values = torch.rand((100, 50, 2, 2), device=device)
first_idxs = torch.tensor([0, 80], dtype=torch.int64, device=device)
padded_to_packed(values, first_idxs, 20)
@staticmethod
def packed_to_padded_with_init(
num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu"
):
meshes = TestPackedToPadded.init_meshes(
num_meshes, num_verts, num_faces, device
)
faces = meshes.faces_packed()
mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_faces = meshes.num_faces_per_mesh().max().item()
if num_d == 0:
values = torch.rand((faces.shape[0],), device=meshes.device)
else:
values = torch.rand((faces.shape[0], num_d), device=meshes.device)
torch.cuda.synchronize()
def out():
packed_to_padded(values, mesh_to_faces_packed_first_idx, max_faces)
torch.cuda.synchronize()
return out
@staticmethod
def packed_to_padded_with_init_torch(
num_meshes: int, num_verts: int, num_faces: int, num_d: int, device: str = "cpu"
):
meshes = TestPackedToPadded.init_meshes(
num_meshes, num_verts, num_faces, device
)
faces = meshes.faces_packed()
mesh_to_faces_packed_first_idx = meshes.mesh_to_faces_packed_first_idx()
max_faces = meshes.num_faces_per_mesh().max().item()
if num_d == 0:
values = torch.rand((faces.shape[0],), device=meshes.device)
else:
values = torch.rand((faces.shape[0], num_d), device=meshes.device)
torch.cuda.synchronize()
def out():
TestPackedToPadded.packed_to_padded_python(
values, mesh_to_faces_packed_first_idx, max_faces, device
)
torch.cuda.synchronize()
return out
| 2.21875 | 2 |
easyric/tests/test_io_geotiff.py | HowcanoeWang/EasyRIC | 12 | 2607 | <filename>easyric/tests/test_io_geotiff.py
import pyproj
import pytest
import numpy as np
from easyric.io import geotiff, shp
from skimage.io import imread
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
def test_prase_header_string_width():
out_dict = geotiff._prase_header_string("* 256 image_width (1H) 13503")
assert out_dict['width'] == 13503
def test_prase_header_string_length():
out_dict = geotiff._prase_header_string("* 257 image_length (1H) 19866")
assert out_dict['length'] == 19866
def test_prase_header_string_scale():
in_str = "* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0"
out_dict = geotiff._prase_header_string(in_str)
assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004)
def test_prase_header_string_tie_point():
in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823,"
out_dict = geotiff._prase_header_string(in_str)
assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823)
in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0"
out_dict = geotiff._prase_header_string(in_str)
assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823)
def test_prase_header_string_nodata():
out_dict = geotiff._prase_header_string("* 42113 gdal_nodata (7s) b'-10000'")
assert out_dict['nodata'] == -10000
def test_prase_header_string_proj_normal(capsys):
in_str = "* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 54N|WGS 84|'"
out_dict = geotiff._prase_header_string(in_str)
captured = capsys.readouterr()
assert f"[io][geotiff][GeoCorrd] Comprehense [{in_str}]" in captured.out
assert out_dict['proj'] == pyproj.CRS.from_epsg(32654)
def test_prase_header_string_proj_error(capsys):
# should raise error because WGS 84 / UTM ... should be full
out_dict = geotiff._prase_header_string("* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'")
captured = capsys.readouterr()
assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is not a CRS: UTM zone 54N]' in captured.out
assert out_dict['proj'] == None
def test_get_imarray_without_header(capsys):
pass
def test_get_imarray_with_header(capsys):
pass
def test_point_query_one_point():
point = (368023.004, 3955500.669)
out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point)
np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3)
def test_point_query_numpy_points():
points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]])
out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points)
expected = np.asarray([97.624344, 97.59617])
np.testing.assert_almost_equal(out, expected, decimal=3)
def test_point_query_list_numpy_points():
points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]])
point = np.asarray([[368023.004, 3955500.669]])
p_list = [point, points]
expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])]
out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list)
assert type(expected) == type(out)
np.testing.assert_almost_equal(expected[0], out[0], decimal=3)
np.testing.assert_almost_equal(expected[1], out[1], decimal=3)
def test_point_query_wrong_types():
# [TODO]
pass
def test_point_query_input_ndarray():
# [Todo]
pass
def test_mean_values(capsys):
mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif')
captured = capsys.readouterr()
# When not convert to float, mean_values = 97.562584
# assert mean_ht == np.float32(97.562584)
np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3)
# another case that not working in previous version:
# Cannot convert np.nan to int, fixed by astype(float)
mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif')
captured = capsys.readouterr()
np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3)
def test_gis2pixel2gis():
geo_head_txt = """
TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff
Series 0: 31255x19436x4, uint8, YXS, 1 pages, not mem-mappable
Page 0: 31255x19436x4, uint8, 8 bit, rgb, lzw
* 256 image_width (1H) 19436
* 257 image_length (1H) 31255
* 258 bits_per_sample (4H) (8, 8, 8, 8)
* 259 compression (1H) 5
* 262 photometric (1H) 2
* 273 strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506, 502970, 5
* 277 samples_per_pixel (1H) 4
* 278 rows_per_strip (1H) 1
* 279 strip_byte_counts (31255Q) (464, 464, 464, 464, 464, 464, 464, 464, 464,
* 284 planar_configuration (1H) 1
* 305 software (12s) b'pix4dmapper'
* 317 predictor (1H) 2
* 338 extra_samples (1H) 2
* 339 sample_format (4H) (1, 1, 1, 1)
* 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0)
* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003,
* 34735 geo_key_directory (32H) (1, 1, 0, 7, 1024, 0, 1, 1, 1025, 0, 1, 1, 1026
* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 53N|WGS 84|'
"""
gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431],
[ 484593.41064743, 3862259.92582402],
[ 484593.64841806, 3862260.06515117],
[ 484593.93077419, 3862259.55455913],
[ 484593.67474654, 3862259.42413431]])
header = geotiff._prase_header_string(geo_head_txt)
expected_pixel = np.asarray([[16972, 26086],
[16708, 25585],
[16946, 25445],
[17228, 25956],
[16972, 26086]])
pixel_coord = geotiff.geo2pixel(gis_coord, header)
np.testing.assert_almost_equal(pixel_coord, expected_pixel)
gis_revert = geotiff.pixel2geo(pixel_coord, header)
np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3)
def test_is_roi_type():
roi1 = np.asarray([[123, 456], [456, 789]])
roi2 = [roi1, roi1]
roi_wrong_1 = (123, 345)
roi_wrong_2 = [123, 456]
roi_wrong_3 = [[123, 345], [456, 789]]
roi1_out = geotiff._is_roi_type(roi1)
assert roi1_out == [roi1]
roi2_out = geotiff._is_roi_type(roi2)
assert roi2_out == roi2
with pytest.raises(TypeError) as errinfo:
roi_w1_out = geotiff._is_roi_type(roi_wrong_1)
assert 'Only numpy.ndarray points and list contains numpy.ndarray points are supported' in str(errinfo.value)
with pytest.raises(TypeError) as errinfo:
roi_w2_out = geotiff._is_roi_type(roi_wrong_2)
assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value)
with pytest.raises(TypeError) as errinfo:
roi_w3_out = geotiff._is_roi_type(roi_wrong_3)
assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value)
def test_imarray_clip_2d_rgb_rgba():
photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG'
roi = np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]])
fig, ax = plt.subplots(1,3, figsize=(12,4))
# -----------------------------------------------
imarray_rgb = imread(photo_path)
assert imarray_rgb.shape == (3456, 4608, 3)
im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi)
ax[1].imshow(im_out_rgb / 255)
ax[1].set_title('rgb')
# -----------------------------------------------
imarray_2d = rgb2gray(imarray_rgb)
assert imarray_2d.shape == (3456, 4608)
im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi)
ax[0].imshow(im_out_2d, cmap='gray')
ax[0].set_title('gray')
# -----------------------------------------------
imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255))
assert imarray_rgba.shape == (3456, 4608, 4)
im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi)
ax[2].imshow(im_out_rgba/255)
ax[2].set_title('rgba')
plt.show()
def test_clip_roi_pixel():
poly = shp.read_shp2d('file/shp_test/test.shp')
poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif'))
imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False)
assert len(imarray) == 1
def test_clip_roi_geo():
poly = shp.read_shp2d('file/shp_test/test.shp')
imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True)
assert len(imarray) == 1 | 2.234375 | 2 |
src/ebay_rest/api/buy_marketplace_insights/models/item_location.py | matecsaj/ebay_rest | 3 | 2608 | # coding: utf-8
"""
Marketplace Insights API
<a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> The Marketplace Insights API provides the ability to search for sold items on eBay by keyword, GTIN, category, and product and returns the of sales history of those items. # noqa: E501
OpenAPI spec version: v1_beta.2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ItemLocation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'address_line1': 'str',
'address_line2': 'str',
'city': 'str',
'country': 'str',
'county': 'str',
'postal_code': 'str',
'state_or_province': 'str'
}
attribute_map = {
'address_line1': 'addressLine1',
'address_line2': 'addressLine2',
'city': 'city',
'country': 'country',
'county': 'county',
'postal_code': 'postalCode',
'state_or_province': 'stateOrProvince'
}
def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501
"""ItemLocation - a model defined in Swagger""" # noqa: E501
self._address_line1 = None
self._address_line2 = None
self._city = None
self._country = None
self._county = None
self._postal_code = None
self._state_or_province = None
self.discriminator = None
if address_line1 is not None:
self.address_line1 = address_line1
if address_line2 is not None:
self.address_line2 = address_line2
if city is not None:
self.city = city
if country is not None:
self.country = country
if county is not None:
self.county = county
if postal_code is not None:
self.postal_code = postal_code
if state_or_province is not None:
self.state_or_province = state_or_province
@property
def address_line1(self):
"""Gets the address_line1 of this ItemLocation. # noqa: E501
The first line of the street address. # noqa: E501
:return: The address_line1 of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._address_line1
@address_line1.setter
def address_line1(self, address_line1):
"""Sets the address_line1 of this ItemLocation.
The first line of the street address. # noqa: E501
:param address_line1: The address_line1 of this ItemLocation. # noqa: E501
:type: str
"""
self._address_line1 = address_line1
@property
def address_line2(self):
"""Gets the address_line2 of this ItemLocation. # noqa: E501
The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501
:return: The address_line2 of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._address_line2
@address_line2.setter
def address_line2(self, address_line2):
"""Sets the address_line2 of this ItemLocation.
The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501
:param address_line2: The address_line2 of this ItemLocation. # noqa: E501
:type: str
"""
self._address_line2 = address_line2
@property
def city(self):
"""Gets the city of this ItemLocation. # noqa: E501
The city in which the item is located. # noqa: E501
:return: The city of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this ItemLocation.
The city in which the item is located. # noqa: E501
:param city: The city of this ItemLocation. # noqa: E501
:type: str
"""
self._city = city
@property
def country(self):
"""Gets the country of this ItemLocation. # noqa: E501
The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501
:return: The country of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this ItemLocation.
The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501
:param country: The country of this ItemLocation. # noqa: E501
:type: str
"""
self._country = country
@property
def county(self):
"""Gets the county of this ItemLocation. # noqa: E501
The county in which the item is located. # noqa: E501
:return: The county of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._county
@county.setter
def county(self, county):
"""Sets the county of this ItemLocation.
The county in which the item is located. # noqa: E501
:param county: The county of this ItemLocation. # noqa: E501
:type: str
"""
self._county = county
@property
def postal_code(self):
"""Gets the postal_code of this ItemLocation. # noqa: E501
The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501
:return: The postal_code of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._postal_code
@postal_code.setter
def postal_code(self, postal_code):
"""Sets the postal_code of this ItemLocation.
The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501
:param postal_code: The postal_code of this ItemLocation. # noqa: E501
:type: str
"""
self._postal_code = postal_code
@property
def state_or_province(self):
"""Gets the state_or_province of this ItemLocation. # noqa: E501
The state or province in which the item is located. # noqa: E501
:return: The state_or_province of this ItemLocation. # noqa: E501
:rtype: str
"""
return self._state_or_province
@state_or_province.setter
def state_or_province(self, state_or_province):
"""Sets the state_or_province of this ItemLocation.
The state or province in which the item is located. # noqa: E501
:param state_or_province: The state_or_province of this ItemLocation. # noqa: E501
:type: str
"""
self._state_or_province = state_or_province
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ItemLocation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ItemLocation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 2.171875 | 2 |
fractionalKnapsack.py | aadishgoel2013/Algos-with-Python | 6 | 2609 | <filename>fractionalKnapsack.py
# Fractional Knapsack
wt = [40,50,30,10,10,40,30]
pro = [30,20,20,25,5,35,15]
n = len(wt)
data = [ (i,pro[i],wt[i]) for i in range(n) ]
bag = 100
data.sort(key=lambda x: x[1]/x[2], reverse=True)
profit=0
ans=[]
i=0
while i<n:
if data[i][2]<=bag:
bag-=data[i][2]
ans.append(data[i][0])
profit+=data[i][1]
i+=1
else:
break
if i<n:
ans.append(data[i][0])
profit += (bag*data[i][1])/data[i][2]
print(profit,ans)
| 2.859375 | 3 |
pysrc/classifier.py | CrackerCat/xed | 1,261 | 2610 | #!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
from __future__ import print_function
import re
import genutil
import codegen
def _emit_function(fe, isa_sets, name):
fo = codegen.function_object_t('xed_classify_{}'.format(name))
fo.add_arg('const xed_decoded_inst_t* d')
fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)')
# FIXME: 2017-07-14 optimization: could use a static array for faster checking, smaller code
switch = codegen.c_switch_generator_t('isa_set', fo)
isa_sets_sorted = sorted(isa_sets)
for c in isa_sets_sorted:
switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False)
if len(isa_sets) > 0:
switch.add('return 1;')
switch.add_default(['return 0;'], do_break=False)
switch.finish()
fo.emit_file_emitter(fe)
def work(agi):
sse_isa_sets = set([])
avx_isa_sets = set([])
avx512_isa_sets = set([])
avx512_kmask_op = set([])
for generator in agi.generator_list:
for ii in generator.parser_output.instructions:
if genutil.field_check(ii, 'iclass'):
if re.search('AVX512',ii.isa_set):
avx512_isa_sets.add(ii.isa_set)
if re.search('KOP',ii.isa_set):
avx512_kmask_op.add(ii.isa_set)
elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']:
avx_isa_sets.add(ii.isa_set)
elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']:
# Exclude MMX instructions that come in with SSE2 &
# SSSE3. The several purely MMX instr in SSE are
# "SSE-opcodes" with memop operands. One can look for
# those with SSE2MMX and SSSE3MMX xed isa_sets.
#
# Also exclude the SSE_PREFETCH operations; Those are
# just memops.
if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set)
and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)):
sse_isa_sets.add(ii.isa_set)
fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t
_emit_function(fe, avx512_isa_sets, 'avx512')
_emit_function(fe, avx512_kmask_op, 'avx512_maskop')
_emit_function(fe, avx_isa_sets, 'avx')
_emit_function(fe, sse_isa_sets, 'sse')
fe.close()
return
| 1.914063 | 2 |
tests/integration/api/test_target_groups.py | lanz/Tenable.io-SDK-for-Python | 90 | 2611 | <reponame>lanz/Tenable.io-SDK-for-Python<gh_stars>10-100
import pytest
from tenable_io.api.target_groups import TargetListEditRequest
from tenable_io.api.models import TargetGroup, TargetGroupList
@pytest.mark.vcr()
def test_target_groups_create(new_target_group):
assert isinstance(new_target_group, TargetGroup), u'The `create` method did not return type `TargetGroup`.'
@pytest.mark.vcr()
def test_target_groups_details(client, new_target_group):
target_group = new_target_group
details = client.target_groups_api.details(target_group.id)
assert isinstance(details, TargetGroup), u'The `details` method did not return type `TargetGroup`.'
assert details.id == target_group.id, u'Expected the `details` response to match the requested target group.'
@pytest.mark.vcr()
def test_target_groups_list(client):
target_groups = client.target_groups_api.list()
assert isinstance(target_groups, TargetGroupList), u'The `details` method did not return type `TargetGroup`.'
for group in target_groups.target_groups:
assert isinstance(group, TargetGroup), u'Expected a list of type `TargetGroup`.'
@pytest.mark.vcr()
def test_target_groups_delete(client, new_target_group):
assert client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.'
@pytest.mark.vcr()
def test_target_groups_edit(client, new_target_group):
target_group = new_target_group
edited_name = 'test_target_group_edit'
edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id)
assert isinstance(edited_group, TargetGroup), u'The `edit` method did not return type `TargetGroup`.'
assert edited_group.id == target_group.id, u'Expected the edited target group to match the requested target group.'
assert edited_group.name == edited_name, u'Expected the name to be updated.'
| 2.1875 | 2 |
Installation/nnAudio/Spectrogram.py | tasercake/nnAudio | 0 | 2612 | """
Module containing all the spectrogram classes
"""
# 0.2.0
import torch
import torch.nn as nn
from torch.nn.functional import conv1d, conv2d, fold
import numpy as np
from time import time
from nnAudio.librosa_functions import *
from nnAudio.utils import *
sz_float = 4 # size of a float
epsilon = 10e-8 # fudge factor for normalization
### --------------------------- Spectrogram Classes ---------------------------###
class STFT(torch.nn.Module):
"""This function is to calculate the short-time Fourier transform (STFT) of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the STFT kernel, if ``True``, the time index is the center of
the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
inverse : bool
To activate the iSTFT module or not. By default, it is False to save GPU memory.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``
output_format : str
Control the spectrogram output type, either ``Magnitude``, ``Complex``, or ``Phase``.
The output_format can also be changed during the ``forward`` method.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
device : str
Choose which device to initialize this layer. Default value is 'cpu'
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
``shape = (num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
``shape = (num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.STFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, pad_mode='reflect', iSTFT=False,
fmin=50, fmax=6000, sr=22050, trainable=False,
output_format="Complex", verbose=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.output_format = output_format
self.trainable = trainable
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.freq_bins = freq_bins
self.trainable = trainable
self.pad_amount = self.n_fft // 2
self.window = window
self.win_length = win_length
self.iSTFT = iSTFT
self.trainable = trainable
start = time()
# Create filter windows for stft
kernel_sin, kernel_cos, self.bins2freq, self.bin_list, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=freq_bins,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=verbose)
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float)
# In this way, the inverse kernel and the forward kernel do not share the same memory...
kernel_sin_inv = torch.cat((kernel_sin, -kernel_sin[1:-1].flip(0)), 0)
kernel_cos_inv = torch.cat((kernel_cos, kernel_cos[1:-1].flip(0)), 0)
if iSTFT:
self.register_buffer('kernel_sin_inv', kernel_sin_inv.unsqueeze(-1))
self.register_buffer('kernel_cos_inv', kernel_cos_inv.unsqueeze(-1))
# Making all these variables nn.Parameter, so that the model can be used with nn.Parallel
# self.kernel_sin = torch.nn.Parameter(self.kernel_sin, requires_grad=self.trainable)
# self.kernel_cos = torch.nn.Parameter(self.kernel_cos, requires_grad=self.trainable)
# Applying window functions to the Fourier kernels
window_mask = torch.tensor(window_mask)
wsin = kernel_sin * window_mask
wcos = kernel_cos * window_mask
if self.trainable==False:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if self.trainable==True:
wsin = torch.nn.Parameter(wsin, requires_grad=self.trainable)
wcos = torch.nn.Parameter(wcos, requires_grad=self.trainable)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
# Prepare the shape of window mask so that it can be used later in inverse
self.register_buffer('window_mask', window_mask.unsqueeze(0).unsqueeze(-1))
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
output_format : str
Control the type of spectrogram to be return. Can be either ``Magnitude`` or ``Complex`` or ``Phase``.
Default value is ``Complex``.
"""
output_format = output_format or self.output_format
self.num_samples = x.shape[-1]
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.pad_amount, 0)
elif self.pad_mode == 'reflect':
if self.num_samples < self.pad_amount:
raise AssertionError("Signal length shorter than reflect padding length (n_fft // 2).")
padding = nn.ReflectionPad1d(self.pad_amount)
x = padding(x)
spec_imag = conv1d(x, self.wsin, stride=self.stride)
spec_real = conv1d(x, self.wcos, stride=self.stride) # Doing STFT by using conv1d
# remove redundant parts
spec_real = spec_real[:, :self.freq_bins, :]
spec_imag = spec_imag[:, :self.freq_bins, :]
if output_format=='Magnitude':
spec = spec_real.pow(2) + spec_imag.pow(2)
if self.trainable==True:
return torch.sqrt(spec+1e-8) # prevent Nan gradient when sqrt(0) due to output=0
else:
return torch.sqrt(spec)
elif output_format=='Complex':
return torch.stack((spec_real,-spec_imag), -1) # Remember the minus sign for imaginary part
elif output_format=='Phase':
return torch.atan2(-spec_imag+0.0,spec_real) # +0.0 removes -0.0 elements, which leads to error in calculating phase
def inverse(self, X, onesided=True, length=None, refresh_win=True):
"""
This function is same as the :func:`~nnAudio.Spectrogram.iSTFT` class,
which is to convert spectrograms back to waveforms.
It only works for the complex value spectrograms. If you have the magnitude spectrograms,
please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
Parameters
----------
onesided : bool
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
length : int
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
refresh_win : bool
Recalculating the window sum square. If you have an input with fixed number of timesteps,
you can increase the speed by setting ``refresh_win=False``. Else please keep ``refresh_win=True``
"""
if (hasattr(self, 'kernel_sin_inv') != True) or (hasattr(self, 'kernel_cos_inv') != True):
raise NameError("Please activate the iSTFT module by setting `iSTFT=True` if you want to use `inverse`")
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)."\
"\nIf you have a magnitude spectrogram, please consider using Griffin-Lim."
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos_inv, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin_inv, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
def extra_repr(self) -> str:
return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(
self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable
)
class MelSpectrogram(torch.nn.Module):
"""This function is to calculate the Melspectrogram of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio.
It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Mel filter banks. The filter banks maps the n_fft to mel bins.
Default value is 128.
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``,
the time index is the beginning of the STFT kernel, if ``True``, the time index is the
center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the
Mel scale is logarithmic. The default value is ``False``.
fmin : int
The starting frequency for the lowest Mel filter bank.
fmax : int
The ending frequency for the highest Mel filter bank.
trainable_mel : bool
Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel
filter banks will also be calculated and the Mel filter banks will be updated during model
training. Default value is ``False``.
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MelSpectrogram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512,
window='hann', center=True, pad_mode='reflect', power=2.0, htk=False,
fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False,
verbose=True, **kwargs):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.power = power
self.trainable_mel = trainable_mel
self.trainable_STFT = trainable_STFT
self.verbose = verbose
# Preparing for the stft layer. No need for center
self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window,
freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT,
output_format="Magnitude", verbose=verbose, **kwargs)
# Create filter windows for stft
start = time()
# Creating kernel for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
mel_basis = torch.tensor(mel_basis)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
if trainable_mel:
# Making everything nn.Parameter, so that this model can support nn.DataParallel
mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel)
self.register_parameter('mel_basis', mel_basis)
else:
self.register_buffer('mel_basis', mel_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
"""
Convert a batch of waveforms to Mel spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
spec = self.stft(x, output_format='Magnitude')**self.power
melspec = torch.matmul(self.mel_basis, spec)
return melspec
def extra_repr(self) -> str:
return 'Mel filter banks size = {}, trainable_mel={}'.format(
(*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT
)
def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None):
"""
Best-attempt spectrogram inversion
"""
def loss_fn(pred, target):
pred = pred.unsqueeze(1) if pred.ndim == 3 else pred
target = target.unsqueeze(1) if target.ndim == 3 else target
loss = (pred - target).pow(2).sum(-2).mean()
return loss
verbose = verbose or self.verbose
# SGD arguments
default_sgd_kwargs = dict(lr=1e3, momentum=0.9)
if sgd_kwargs:
default_sgd_kwargs.update(sgd_kwargs)
sgd_kwargs = default_sgd_kwargs
mel_basis = self.mel_basis.detach()
shape = melspec.shape
batch_size, n_mels, time = shape[0], shape[-2], shape[-1]
_, n_freq = mel_basis.shape
melspec = melspec.detach().view(-1, n_mels, time)
if random_start:
pred_stft_shape = (batch_size, n_freq, time)
pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps)
else:
pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps)
pred_stft = nn.Parameter(pred_stft, requires_grad=True)
sgd_kwargs["lr"] = sgd_kwargs["lr"] * batch_size
optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs)
losses = []
for i in range(max_steps):
optimizer.zero_grad()
pred_mel = mel_basis @ pred_stft
loss = loss_fn(pred_mel, melspec)
losses.append(loss.item())
loss.backward()
optimizer.step()
# Check conditions
if not loss.isfinite():
raise OverflowError("Overflow encountered in Mel -> STFT optimization")
if loss_threshold and loss < loss_threshold:
if verbose:
print(f"Target error of {loss_threshold} reached. Stopping optimization.")
break
if grad_threshold and pred_stft.grad.max() < grad_threshold:
if verbose:
print(f"Target max gradient of {grad_threshold} reached. Stopping optimization.")
break
pred_stft = pred_stft.detach().clamp(eps) ** 0.5
pred_stft = pred_stft.view((*shape[:-2], n_freq, time))
if return_extras:
return pred_stft, pred_mel.detach(), losses
return pred_stft
def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None):
default_mel_inversion_params = {}
default_stft_inversion_params = {}
mel_inversion_params = mel_inversion_params or {}
stft_inversion_params = stft_inversion_params or {}
if mel_inversion_params:
mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params}
if stft_inversion_params:
stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params}
recon_stft = self.to_stft(melspec, **mel_inversion_params)
recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params)
return recon_audio
class MFCC(torch.nn.Module):
"""This function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal.
This algorithm first extracts Mel spectrograms from the audio clips,
then the discrete cosine transform is calcuated to obtain the final MFCCs.
Therefore, the Mel spectrogram part can be made trainable using
``trainable_mel`` and ``trainable_STFT``.
It only support type-II DCT at the moment. Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_mfcc : int
The number of Mel-frequency cepstral coefficients
norm : string
The default value is 'ortho'. Normalization for DCT basis
**kwargs
Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window
Returns
-------
MFCCs : torch.tensor
It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MFCC()
>>> mfcc = spec_layer(x)
"""
def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs):
super().__init__()
self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs)
self.m_mfcc = n_mfcc
# attributes that will be used for _power_to_db
if amin <= 0:
raise ParameterError('amin must be strictly positive')
amin = torch.tensor([amin])
ref = torch.abs(torch.tensor([ref]))
self.register_buffer('amin', amin)
self.register_buffer('ref', ref)
self.top_db = top_db
self.n_mfcc = n_mfcc
def _power_to_db(self, S):
'''
Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db
for the original implmentation.
'''
log_spec = 10.0 * torch.log10(torch.max(S, self.amin))
log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError('top_db must be non-negative')
# make the dim same as log_spec so that it can be broadcasted
batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1)
log_spec = torch.max(log_spec, batch_wise_max - self.top_db)
return log_spec
def _dct(self, x, norm=None):
'''
Refer to https://github.com/zh217/torch-dct for the original implmentation.
'''
x = x.permute(0,2,1) # make freq the last axis, since dct applies to the frequency axis
x_shape = x.shape
N = x_shape[-1]
v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2)
Vc = torch.rfft(v, 1, onesided=False)
# TODO: Can make the W_r and W_i trainable here
k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi / (2 * N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, :, 0] * W_r - Vc[:, :, :, 1] * W_i
if norm == 'ortho':
V[:, :, 0] /= np.sqrt(N) * 2
V[:, :, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V
return V.permute(0,2,1) # swapping back the time axis and freq axis
def forward(self, x):
"""
Convert a batch of waveforms to MFCC.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = self.melspec_layer(x)
x = self._power_to_db(x)
x = self._dct(x, norm='ortho')[:,:self.m_mfcc,:]
return x
def extra_repr(self) -> str:
return 'n_mfcc = {}'.format(
(self.n_mfcc)
)
class CQT1992(torch.nn.Module):
"""
This alogrithm uses the method proposed in [1]. Please refer to :func:`~nnAudio.Spectrogram.CQT1992v2` for a more
computational and memory efficient version.
[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
trainable_STFT : bool
Determine if the time to frequency domain transformation kernel for the input audio is trainable or not.
Default is ``False``
trainable_CQT : bool
Determine if the frequency domain CQT kernel is trainable or not.
Default is ``False``
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=220, fmax=None, n_bins=84,
trainable_STFT=False, trainable_CQT=False, bins_per_octave=12,
output_format='Complex', norm=1, window='hann', center=True, pad_mode='reflect'):
super().__init__()
# norm arg is not functioning
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.norm = norm
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels = fft(cqt_kernels)[:,:self.kernel_width//2+1]
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# creating kernels for stft
# self.cqt_kernels_real*=lenghts.unsqueeze(1)/self.kernel_width # Trying to normalize as librosa
# self.cqt_kernels_imag*=lenghts.unsqueeze(1)/self.kernel_width
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.kernel_width,
window='ones',
freq_scale='no')
# Converting kernels from numpy arrays to torch tensors
wsin = torch.tensor(kernel_sin * window)
wcos = torch.tensor(kernel_cos * window)
cqt_kernels_real = torch.tensor(cqt_kernels.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(cqt_kernels.imag.astype(np.float32))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self, x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# STFT
fourier_real = conv1d(x, self.wcos, stride=self.hop_length)
fourier_imag = conv1d(x, self.wsin, stride=self.hop_length)
# CQT
CQT_real, CQT_imag = complex_mul((self.cqt_kernels_real, self.cqt_kernels_imag),
(fourier_real, fourier_imag))
CQT = torch.stack((CQT_real,-CQT_imag),-1)
if self.norm:
CQT = CQT/self.kernel_width*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT2010(torch.nn.Module):
"""
This algorithm is using the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave.
Then we keep downsampling the input audio by a factor of 2 to convoluting it with the
small CQT kernel. Everytime the input audio is downsampled, the CQT relative to the downsampled
input is equavalent to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the code
from the 1992 alogrithm [2]
[1] <NAME>. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84, bins_per_octave=12,
norm=True, basis_norm=1, window='hann', pad_mode='reflect', trainable_STFT=False,
trainable_CQT=False, output_format='Complex', earlydownsample=True, verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.output_format = output_format
self.earlydownsample = earlydownsample # TODO: activate early downsampling later if possible
# This will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.5,
kernelLength=256,
transitionBandwidth=0.001
)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Calculate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
# print("n_octaves = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
# print("Q = {}, fmin_t = {}, n_filters = {}".format(Q, self.fmin_t, n_filters))
basis, self.n_fft, _ = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# This is for the normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis=basis
fft_basis = fft(basis)[:,:self.n_fft//2+1] # Convert CQT kenral from time domain to freq domain
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(fft_basis.real.astype(np.float32))
cqt_kernels_imag = torch.tensor(fft_basis.imag.astype(np.float32))
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# Preparing kernels for Short-Time Fourier Transform (STFT)
# We set the frequency range in the CQT filter instead of here.
if verbose==True:
print("Creating STFT kernels ...", end='\r')
start = time()
kernel_sin, kernel_cos, self.bins2freq, _, window = create_fourier_kernels(self.n_fft, window='ones', freq_scale='no')
wsin = kernel_sin * window
wcos = kernel_cos * window
wsin = torch.tensor(wsin)
wcos = torch.tensor(wcos)
if verbose==True:
print("STFT kernels created, time used = {:.4f} seconds".format(time()-start))
if trainable_STFT:
wsin = torch.nn.Parameter(wsin, requires_grad=trainable_kernels)
wcos = torch.nn.Parameter(wcos, requires_grad=trainable_kernels)
self.register_parameter('wsin', wsin)
self.register_parameter('wcos', wcos)
else:
self.register_buffer('wsin', wsin)
self.register_buffer('wcos', wcos)
if trainable_CQT:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.wcos, self.wsin, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.wcos, self.wsin, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted top bins
if self.norm:
CQT = CQT/self.n_fft*torch.sqrt(self.lenghts.view(-1,1,1))
else:
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1)
# is make it same mag as 1992
CQT = CQT*self.downsample_factor
if output_format=='Magnitude':
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
def extra_repr(self) -> str:
return 'STFT kernel size = {}, CQT kernel size = {}'.format(
(*self.wcos.shape,), (*self.cqt_kernels_real.shape,)
)
class CQT1992v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the method proposed in [1]. I slightly modify it so that it runs faster
than the original 1992 algorithm, that is why I call it version 2.
[1] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``.
If ``fmax`` is not ``None``, then the argument ``n_bins`` will be ignored and ``n_bins``
will be calculated automatically. Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the CQT keneral at the center of the time-step or not. If ``False``, the time index is
the beginning of the CQT kernel, if ``True``, the time index is the center of the CQT kernel.
Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``.
output_format : str
Determine the return type.
``Magnitude`` will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins,time_steps)``;
``Complex`` will return the STFT result in complex number, shape = ``(num_samples, freq_bins,time_steps, 2)``;
``Phase`` will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT1992v2()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=1, window='hann', center=True, pad_mode='reflect',
trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
# norm arg is not functioning
self.trainable = trainable
self.hop_length = hop_length
self.center = center
self.pad_mode = pad_mode
self.output_format = output_format
# creating kernels for CQT
Q = 1/(2**(1/bins_per_octave)-1)
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
cqt_kernels, self.kernel_width, lenghts = create_cqt_kernels(Q,
sr,
fmin,
n_bins,
bins_per_octave,
norm,
window,
fmax)
self.register_buffer('lenghts', lenghts)
cqt_kernels_real = torch.tensor(cqt_kernels.real).unsqueeze(1)
cqt_kernels_imag = torch.tensor(cqt_kernels.imag).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
def forward(self,x, output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
CQT_imag = -conv1d(x, self.cqt_kernels_imag, stride=self.hop_length) * \
torch.sqrt(self.lenghts.view(-1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
else:
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2)+1e-8)
return CQT
elif output_format=='Complex':
return torch.stack((CQT_real,CQT_imag),-1)
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT_imag,CQT_real))
phase_imag = torch.sin(torch.atan2(CQT_imag,CQT_real))
return torch.stack((phase_real,phase_imag), -1)
def forward_manual(self,x):
"""
Method for debugging
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.kernel_width//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.kernel_width//2)
x = padding(x)
# CQT
CQT_real = conv1d(x, self.cqt_kernels_real, stride=self.hop_length)
CQT_imag = conv1d(x, self.cqt_kernels_imag, stride=self.hop_length)
# Getting CQT Amplitude
CQT = torch.sqrt(CQT_real.pow(2)+CQT_imag.pow(2))
return CQT*torch.sqrt(self.lenghts.view(-1,1))
class CQT2010v2(torch.nn.Module):
"""This function is to calculate the CQT of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
This alogrithm uses the resampling method proposed in [1].
Instead of convoluting the STFT results with a gigantic CQT kernel covering the full frequency
spectrum, we make a small CQT kernel covering only the top octave. Then we keep downsampling the
input audio by a factor of 2 to convoluting it with the small CQT kernel.
Everytime the input audio is downsampled, the CQT relative to the downsampled input is equivalent
to the next lower octave.
The kernel creation process is still same as the 1992 algorithm. Therefore, we can reuse the
code from the 1992 alogrithm [2]
[1] Schörkhuber, Christian. “CONSTANT-Q TRANSFORM TOOLBOX FOR MUSIC PROCESSING.” (2010).
[2] Brown, <NAME>. and <NAME>. “An efficient algorithm for the calculation of a
constant Q transform.” (1992).
Early downsampling factor is to downsample the input audio to reduce the CQT kernel size.
The result with and without early downsampling are more or less the same except in the very low
frequency region where freq < 40Hz.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
hop_length : int
The hop (or stride) size. Default value is 512.
fmin : float
The frequency for the lowest CQT bin. Default is 32.70Hz, which coresponds to the note C0.
fmax : float
The frequency for the highest CQT bin. Default is ``None``, therefore the higest CQT bin is
inferred from the ``n_bins`` and ``bins_per_octave``. If ``fmax`` is not ``None``, then the
argument ``n_bins`` will be ignored and ``n_bins`` will be calculated automatically.
Default is ``None``
n_bins : int
The total numbers of CQT bins. Default is 84. Will be ignored if ``fmax`` is not ``None``.
bins_per_octave : int
Number of bins per octave. Default is 12.
norm : bool
Normalization for the CQT result.
basis_norm : int
Normalization for the CQT kernels. ``1`` means L1 normalization, and ``2`` means L2 normalization.
Default is ``1``, which is same as the normalization used in librosa.
window : str
The windowing function for CQT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'
pad_mode : str
The padding method. Default value is 'reflect'.
trainable : bool
Determine if the CQT kernels are trainable or not. If ``True``, the gradients for CQT kernels
will also be caluclated and the CQT kernels will be updated during model training.
Default value is ``False``
output_format : str
Determine the return type.
'Magnitude' will return the magnitude of the STFT result, shape = ``(num_samples, freq_bins, time_steps)``;
'Complex' will return the STFT result in complex number, shape = ``(num_samples, freq_bins, time_steps, 2)``;
'Phase' will return the phase of the STFT reuslt, shape = ``(num_samples, freq_bins,time_steps, 2)``.
The complex number is stored as ``(real, imag)`` in the last axis. Default value is 'Magnitude'.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms.
shape = ``(num_samples, freq_bins,time_steps)`` if ``output_format='Magnitude'``;
shape = ``(num_samples, freq_bins,time_steps, 2)`` if ``output_format='Complex' or 'Phase'``;
Examples
--------
>>> spec_layer = Spectrogram.CQT2010v2()
>>> specs = spec_layer(x)
"""
# To DO:
# need to deal with the filter and other tensors
def __init__(self, sr=22050, hop_length=512, fmin=32.70, fmax=None, n_bins=84,
bins_per_octave=12, norm=True, basis_norm=1, window='hann', pad_mode='reflect',
earlydownsample=True, trainable=False, output_format='Magnitude', verbose=True):
super().__init__()
self.norm = norm # Now norm is used to normalize the final CQT result by dividing n_fft
# basis_norm is for normalizing basis
self.hop_length = hop_length
self.pad_mode = pad_mode
self.n_bins = n_bins
self.earlydownsample = earlydownsample # We will activate early downsampling later if possible
self.trainable = trainable
self.output_format = output_format
# It will be used to calculate filter_cutoff and creating CQT kernels
Q = 1/(2**(1/bins_per_octave)-1)
# Creating lowpass filter and make it a torch tensor
if verbose==True:
print("Creating low pass filter ...", end='\r')
start = time()
# self.lowpass_filter = torch.tensor(
# create_lowpass_filter(
# band_center = 0.50,
# kernelLength=256,
# transitionBandwidth=0.001))
lowpass_filter = torch.tensor(create_lowpass_filter(
band_center = 0.50,
kernelLength=256,
transitionBandwidth=0.001)
)
# Broadcast the tensor to the shape that fits conv1d
self.register_buffer('lowpass_filter', lowpass_filter[None,None,:])
if verbose==True:
print("Low pass filter created, time used = {:.4f} seconds".format(time()-start))
# Caluate num of filter requires for the kernel
# n_octaves determines how many resampling requires for the CQT
n_filters = min(bins_per_octave, n_bins)
self.n_octaves = int(np.ceil(float(n_bins) / bins_per_octave))
if verbose==True:
print("num_octave = ", self.n_octaves)
# Calculate the lowest frequency bin for the top octave kernel
self.fmin_t = fmin*2**(self.n_octaves-1)
remainder = n_bins % bins_per_octave
# print("remainder = ", remainder)
if remainder==0:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((bins_per_octave-1)/bins_per_octave)
else:
# Calculate the top bin frequency
fmax_t = self.fmin_t*2**((remainder-1)/bins_per_octave)
self.fmin_t = fmax_t/2**(1-1/bins_per_octave) # Adjusting the top minium bins
if fmax_t > sr/2:
raise ValueError('The top bin {}Hz has exceeded the Nyquist frequency, \
please reduce the n_bins'.format(fmax_t))
if self.earlydownsample == True: # Do early downsampling if this argument is True
if verbose==True:
print("Creating early downsampling filter ...", end='\r')
start = time()
sr, self.hop_length, self.downsample_factor, early_downsample_filter, \
self.earlydownsample = get_early_downsample_params(sr,
hop_length,
fmax_t,
Q,
self.n_octaves,
verbose)
self.register_buffer('early_downsample_filter', early_downsample_filter)
if verbose==True:
print("Early downsampling filter created, \
time used = {:.4f} seconds".format(time()-start))
else:
self.downsample_factor=1.
# Preparing CQT kernels
if verbose==True:
print("Creating CQT kernels ...", end='\r')
start = time()
basis, self.n_fft, lenghts = create_cqt_kernels(Q,
sr,
self.fmin_t,
n_filters,
bins_per_octave,
norm=basis_norm,
topbin_check=False)
# For normalization in the end
freqs = fmin * 2.0 ** (np.r_[0:n_bins] / np.float(bins_per_octave))
lenghts = np.ceil(Q * sr / freqs)
lenghts = torch.tensor(lenghts).float()
self.register_buffer('lenghts', lenghts)
self.basis = basis
# These cqt_kernel is already in the frequency domain
cqt_kernels_real = torch.tensor(basis.real.astype(np.float32)).unsqueeze(1)
cqt_kernels_imag = torch.tensor(basis.imag.astype(np.float32)).unsqueeze(1)
if trainable:
cqt_kernels_real = torch.nn.Parameter(cqt_kernels_real, requires_grad=trainable_kernels)
cqt_kernels_imag = torch.nn.Parameter(cqt_kernels_imag, requires_grad=trainable_kernels)
self.register_parameter('cqt_kernels_real', cqt_kernels_real)
self.register_parameter('cqt_kernels_imag', cqt_kernels_imag)
else:
self.register_buffer('cqt_kernels_real', cqt_kernels_real)
self.register_buffer('cqt_kernels_imag', cqt_kernels_imag)
if verbose==True:
print("CQT kernels created, time used = {:.4f} seconds".format(time()-start))
# print("Getting cqt kernel done, n_fft = ",self.n_fft)
# If center==True, the STFT window will be put in the middle, and paddings at the beginning
# and ending are required.
if self.pad_mode == 'constant':
self.padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
self.padding = nn.ReflectionPad1d(self.n_fft//2)
def forward(self,x,output_format=None):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
output_format = output_format or self.output_format
x = broadcast_dim(x)
if self.earlydownsample==True:
x = downsampling_by_n(x, self.early_downsample_filter, self.downsample_factor)
hop = self.hop_length
CQT = get_cqt_complex(x, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding) # Getting the top octave CQT
x_down = x # Preparing a new variable for downsampling
for i in range(self.n_octaves-1):
hop = hop//2
x_down = downsampling_by_2(x_down, self.lowpass_filter)
CQT1 = get_cqt_complex(x_down, self.cqt_kernels_real, self.cqt_kernels_imag, hop, self.padding)
CQT = torch.cat((CQT1, CQT),1)
CQT = CQT[:,-self.n_bins:,:] # Removing unwanted bottom bins
# print("downsample_factor = ",self.downsample_factor)
# print(CQT.shape)
# print(self.lenghts.view(-1,1).shape)
# Normalizing the output with the downsampling factor, 2**(self.n_octaves-1) is make it
# same mag as 1992
CQT = CQT*self.downsample_factor
# Normalize again to get same result as librosa
CQT = CQT*torch.sqrt(self.lenghts.view(-1,1,1))
if output_format=='Magnitude':
if self.trainable==False:
# Getting CQT Amplitude
return torch.sqrt(CQT.pow(2).sum(-1))
else:
return torch.sqrt(CQT.pow(2).sum(-1)+1e-8)
elif output_format=='Complex':
return CQT
elif output_format=='Phase':
phase_real = torch.cos(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
phase_imag = torch.sin(torch.atan2(CQT[:,:,:,1],CQT[:,:,:,0]))
return torch.stack((phase_real,phase_imag), -1)
class CQT(CQT1992v2):
"""An abbreviation for :func:`~nnAudio.Spectrogram.CQT1992v2`. Please refer to the :func:`~nnAudio.Spectrogram.CQT1992v2` documentation"""
pass
# The section below is for developing purpose
# Please don't use the following classes
#
class DFT(torch.nn.Module):
"""
Experimental feature before `torch.fft` was made avaliable.
The inverse function only works for 1 single frame. i.e. input shape = (batch, n_fft, 1)
"""
def __init__(self, n_fft=2048, freq_bins=None, hop_length=512,
window='hann', freq_scale='no', center=True, pad_mode='reflect',
fmin=50, fmax=6000, sr=22050):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
# Create filter windows for stft
wsin, wcos, self.bins2freq = create_fourier_kernels(n_fft=n_fft,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr)
self.wsin = torch.tensor(wsin, dtype=torch.float)
self.wcos = torch.tensor(wcos, dtype=torch.float)
def forward(self,x):
"""
Convert a batch of waveforms to spectrums.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
if self.center:
if self.pad_mode == 'constant':
padding = nn.ConstantPad1d(self.n_fft//2, 0)
elif self.pad_mode == 'reflect':
padding = nn.ReflectionPad1d(self.n_fft//2)
x = padding(x)
imag = conv1d(x, self.wsin, stride=self.stride)
real = conv1d(x, self.wcos, stride=self.stride)
return (real, -imag)
def inverse(self,x_real,x_imag):
"""
Convert a batch of waveforms to CQT spectrograms.
Parameters
----------
x_real : torch tensor
Real part of the signal.
x_imag : torch tensor
Imaginary part of the signal.
"""
x_real = broadcast_dim(x_real)
x_imag = broadcast_dim(x_imag)
x_real.transpose_(1,2) # Prepare the right shape to do inverse
x_imag.transpose_(1,2) # Prepare the right shape to do inverse
# if self.center:
# if self.pad_mode == 'constant':
# padding = nn.ConstantPad1d(self.n_fft//2, 0)
# elif self.pad_mode == 'reflect':
# padding = nn.ReflectionPad1d(self.n_fft//2)
# x_real = padding(x_real)
# x_imag = padding(x_imag)
# Watch out for the positive and negative signs
# ifft = e^(+2\pi*j)*X
# ifft(X_real) = (a1, a2)
# ifft(X_imag)*1j = (b1, b2)*1j
# = (-b2, b1)
a1 = conv1d(x_real, self.wcos, stride=self.stride)
a2 = conv1d(x_real, self.wsin, stride=self.stride)
b1 = conv1d(x_imag, self.wcos, stride=self.stride)
b2 = conv1d(x_imag, self.wsin, stride=self.stride)
imag = a2+b1
real = a1-b2
return (real/self.n_fft, imag/self.n_fft)
class iSTFT(torch.nn.Module):
"""This class is to convert spectrograms back to waveforms. It only works for the complex value spectrograms.
If you have the magnitude spectrograms, please use :func:`~nnAudio.Spectrogram.Griffin_Lim`.
The parameters (e.g. n_fft, window) need to be the same as the STFT in order to obtain the correct inverse.
If trainability is not required, it is recommended to use the ``inverse`` method under the ``STFT`` class
to save GPU/RAM memory.
When ``trainable=True`` and ``freq_scale!='no'``, there is no guarantee that the inverse is perfect, please
use with extra care.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
freq_bins : int
Number of frequency bins. Default is ``None``, which means ``n_fft//2+1`` bins
Please make sure the value is the same as the forward STFT.
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
freq_scale : 'linear', 'log', or 'no'
Determine the spacing between each frequency bin. When `linear` or `log` is used,
the bin spacing can be controlled by ``fmin`` and ``fmax``. If 'no' is used, the bin will
start at 0Hz and end at Nyquist frequency with linear spacing.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
fmin : int
The starting frequency for the lowest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
fmax : int
The ending frequency for the highest frequency bin. If freq_scale is ``no``, this argument
does nothing. Please make sure the value is the same as the forward STFT.
sr : int
The sampling rate for the input audio. It is used to calucate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
trainable_kernels : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
trainable_window : bool
Determine if the window function is trainable or not.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a batch of waveforms.
Examples
--------
>>> spec_layer = Spectrogram.iSTFT()
>>> specs = spec_layer(x)
"""
def __init__(self, n_fft=2048, win_length=None, freq_bins=None, hop_length=None, window='hann',
freq_scale='no', center=True, fmin=50, fmax=6000, sr=22050, trainable_kernels=False,
trainable_window=False, verbose=True, refresh_win=True):
super().__init__()
# Trying to make the default setting same as librosa
if win_length==None: win_length = n_fft
if hop_length==None: hop_length = int(win_length // 4)
self.n_fft = n_fft
self.win_length = win_length
self.stride = hop_length
self.center = center
self.pad_amount = self.n_fft // 2
self.refresh_win = refresh_win
start = time()
# Create the window function and prepare the shape for batch-wise-time-wise multiplication
# Create filter windows for inverse
kernel_sin, kernel_cos, _, _, window_mask = create_fourier_kernels(n_fft,
win_length=win_length,
freq_bins=n_fft,
window=window,
freq_scale=freq_scale,
fmin=fmin,
fmax=fmax,
sr=sr,
verbose=False)
window_mask = get_window(window,int(win_length), fftbins=True)
# For inverse, the Fourier kernels do not need to be windowed
window_mask = torch.tensor(window_mask).unsqueeze(0).unsqueeze(-1)
# kernel_sin and kernel_cos have the shape (freq_bins, 1, n_fft, 1) to support 2D Conv
kernel_sin = torch.tensor(kernel_sin, dtype=torch.float).unsqueeze(-1)
kernel_cos = torch.tensor(kernel_cos, dtype=torch.float).unsqueeze(-1)
# Decide if the Fourier kernels are trainable
if trainable_kernels:
# Making all these variables trainable
kernel_sin = torch.nn.Parameter(kernel_sin, requires_grad=trainable_kernels)
kernel_cos = torch.nn.Parameter(kernel_cos, requires_grad=trainable_kernels)
self.register_parameter('kernel_sin', kernel_sin)
self.register_parameter('kernel_cos', kernel_cos)
else:
self.register_buffer('kernel_sin', kernel_sin)
self.register_buffer('kernel_cos', kernel_cos)
# Decide if the window function is trainable
if trainable_window:
window_mask = torch.nn.Parameter(window_mask, requires_grad=trainable_window)
self.register_parameter('window_mask', window_mask)
else:
self.register_buffer('window_mask', window_mask)
if verbose==True:
print("iSTFT kernels created, time used = {:.4f} seconds".format(time()-start))
else:
pass
def forward(self, X, onesided=False, length=None, refresh_win=None):
"""
If your spectrograms only have ``n_fft//2+1`` frequency bins, please use ``onesided=True``,
else use ``onesided=False``
To make sure the inverse STFT has the same output length of the original waveform, please
set `length` as your intended waveform length. By default, ``length=None``,
which will remove ``n_fft//2`` samples from the start and the end of the output.
If your input spectrograms X are of the same length, please use ``refresh_win=None`` to increase
computational speed.
"""
if refresh_win==None:
refresh_win=self.refresh_win
assert X.dim()==4 , "Inverse iSTFT only works for complex number," \
"make sure our tensor is in the shape of (batch, freq_bins, timesteps, 2)"
# If the input spectrogram contains only half of the n_fft
# Use extend_fbins function to get back another half
if onesided:
X = extend_fbins(X) # extend freq
X_real, X_imag = X[:, :, :, 0], X[:, :, :, 1]
# broadcast dimensions to support 2D convolution
X_real_bc = X_real.unsqueeze(1)
X_imag_bc = X_imag.unsqueeze(1)
a1 = conv2d(X_real_bc, self.kernel_cos, stride=(1,1))
b2 = conv2d(X_imag_bc, self.kernel_sin, stride=(1,1))
# compute real and imag part. signal lies in the real part
real = a1 - b2
real = real.squeeze(-2)*self.window_mask
# Normalize the amplitude with n_fft
real /= (self.n_fft)
# Overlap and Add algorithm to connect all the frames
real = overlap_add(real, self.stride)
# Prepare the window sumsqure for division
# Only need to create this window once to save time
# Unless the input spectrograms have different time steps
if hasattr(self, 'w_sum')==False or refresh_win==True:
self.w_sum = torch_window_sumsquare(self.window_mask.flatten(), X.shape[2], self.stride, self.n_fft).flatten()
self.nonzero_indices = (self.w_sum>1e-10)
else:
pass
real[:, self.nonzero_indices] = real[:,self.nonzero_indices].div(self.w_sum[self.nonzero_indices])
# Remove padding
if length is None:
if self.center:
real = real[:, self.pad_amount:-self.pad_amount]
else:
if self.center:
real = real[:, self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
class Griffin_Lim(torch.nn.Module):
"""
Converting Magnitude spectrograms back to waveforms based on the "fast Griffin-Lim"[1].
This Griffin Lim is a direct clone from librosa.griffinlim.
[1] <NAME>., <NAME>., & <NAME>. “A fast Griffin-Lim algorithm,”
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4), Oct. 2013.
Parameters
----------
n_fft : int
The window size. Default value is 2048.
n_iter=32 : int
The number of iterations for Griffin-Lim. The default value is ``32``
hop_length : int
The hop (or stride) size. Default value is ``None`` which is equivalent to ``n_fft//4``.
Please make sure the value is the same as the forward STFT.
window : str
The windowing function for iSTFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
Please make sure the value is the same as the forward STFT.
center : bool
Putting the iSTFT keneral at the center of the time-step or not. If ``False``, the time
index is the beginning of the iSTFT kernel, if ``True``, the time index is the center of
the iSTFT kernel. Default value if ``True``.
Please make sure the value is the same as the forward STFT.
momentum : float
The momentum for the update rule. The default value is ``0.99``.
device : str
Choose which device to initialize this layer. Default value is 'cpu'
"""
def __init__(self,
n_fft,
n_iter=32,
hop_length=None,
win_length=None,
window='hann',
center=True,
pad_mode='reflect',
momentum=0.99,
device='cpu'):
super().__init__()
self.n_fft = n_fft
self.win_length = win_length
self.n_iter = n_iter
self.center = center
self.pad_mode = pad_mode
self.momentum = momentum
self.device = device
if win_length==None:
self.win_length=n_fft
else:
self.win_length=win_length
if hop_length==None:
self.hop_length = n_fft//4
else:
self.hop_length = hop_length
# Creating window function for stft and istft later
self.w = torch.tensor(get_window(window,
int(self.win_length),
fftbins=True),
device=device).float()
def forward(self, S):
"""
Convert a batch of magnitude spectrograms to waveforms.
Parameters
----------
S : torch tensor
Spectrogram of the shape ``(batch, n_fft//2+1, timesteps)``
"""
assert S.dim()==3 , "Please make sure your input is in the shape of (batch, freq_bins, timesteps)"
# Initializing Random Phase
rand_phase = torch.randn(*S.shape, device=self.device)
angles = torch.empty((*S.shape,2), device=self.device)
angles[:, :,:,0] = torch.cos(2 * np.pi * rand_phase)
angles[:,:,:,1] = torch.sin(2 * np.pi * rand_phase)
# Initializing the rebuilt magnitude spectrogram
rebuilt = torch.zeros(*angles.shape, device=self.device)
for _ in range(self.n_iter):
tprev = rebuilt # Saving previous rebuilt magnitude spec
# spec2wav conversion
# print(f'win_length={self.win_length}\tw={self.w.shape}')
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
# wav2spec conversion
rebuilt = torch.stft(inverse,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
pad_mode=self.pad_mode)
# Phase update rule
angles[:,:,:] = rebuilt[:,:,:] - (self.momentum / (1 + self.momentum)) * tprev[:,:,:]
# Phase normalization
angles = angles.div(torch.sqrt(angles.pow(2).sum(-1)).unsqueeze(-1) + 1e-16) # normalizing the phase
# Using the final phase to reconstruct the waveforms
inverse = torch.istft(S.unsqueeze(-1) * angles,
self.n_fft,
self.hop_length,
win_length=self.win_length,
window=self.w,
center=self.center)
return inverse
| 2.609375 | 3 |
train.py | hui-won/KoBART_Project | 13 | 2613 | <gh_stars>10-100
import argparse
import logging
import os
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from pytorch_lightning import loggers as pl_loggers
from torch.utils.data import DataLoader, Dataset
from dataset import KoBARTSummaryDataset
from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast
from transformers.optimization import AdamW, get_cosine_schedule_with_warmup
from kobart import get_pytorch_kobart_model, get_kobart_tokenizer
parser = argparse.ArgumentParser(description='KoBART translation')
parser.add_argument('--checkpoint_path',
type=str,
help='checkpoint path')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class ArgsBase():
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(
parents=[parent_parser], add_help=False)
parser.add_argument('--train_file',
type=str,
default='data/train.tsv',
help='train file')
parser.add_argument('--test_file',
type=str,
default='data/test.tsv',
help='test file')
parser.add_argument('--batch_size',
type=int,
default=28,
help='')
parser.add_argument('--max_len',
type=int,
default=512,
help='max seq len')
return parser
class KobartSummaryModule(pl.LightningDataModule):
def __init__(self, train_file,
test_file, tok,
max_len=512,
batch_size=8,
num_workers=5):
super().__init__()
self.batch_size = batch_size
self.max_len = max_len
self.train_file_path = train_file
self.test_file_path = test_file
if tok is None:
self.tok = get_kobart_tokenizer()
else:
self.tok = tok
self.num_workers = num_workers
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(
parents=[parent_parser], add_help=False)
parser.add_argument('--num_workers',
type=int,
default=5,
help='num of worker for dataloader')
return parser
# OPTIONAL, called for every GPU/machine (assigning state is OK)
def setup(self, stage):
# split dataset
self.train = KoBARTSummaryDataset(self.train_file_path,
self.tok,
self.max_len)
self.test = KoBARTSummaryDataset(self.test_file_path,
self.tok,
self.max_len)
def train_dataloader(self):
train = DataLoader(self.train,
batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=True)
return train
def val_dataloader(self):
val = DataLoader(self.test,
batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=False)
return val
def test_dataloader(self):
test = DataLoader(self.test,
batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=False)
return test
class Base(pl.LightningModule):
def __init__(self, hparams, **kwargs) -> None:
super(Base, self).__init__()
self.hparams = hparams
@staticmethod
def add_model_specific_args(parent_parser):
# add model specific args
parser = argparse.ArgumentParser(
parents=[parent_parser], add_help=False)
parser.add_argument('--batch-size',
type=int,
default=14,
help='batch size for training (default: 96)')
parser.add_argument('--lr',
type=float,
default=3e-5,
help='The initial learning rate')
parser.add_argument('--warmup_ratio',
type=float,
default=0.1,
help='warmup ratio')
parser.add_argument('--model_path',
type=str,
default=None,
help='kobart model path')
return parser
def configure_optimizers(self):
# Prepare optimizer
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=self.hparams.lr, correct_bias=False)
# warm up lr
num_workers = (self.hparams.gpus if self.hparams.gpus is not None else 1) * (self.hparams.num_nodes if self.hparams.num_nodes is not None else 1)
data_len = len(self.train_dataloader().dataset)
logging.info(f'number of workers {num_workers}, data length {data_len}')
num_train_steps = int(data_len / (self.hparams.batch_size * num_workers) * self.hparams.max_epochs)
logging.info(f'num_train_steps : {num_train_steps}')
num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio)
logging.info(f'num_warmup_steps : {num_warmup_steps}')
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps)
lr_scheduler = {'scheduler': scheduler,
'monitor': 'loss', 'interval': 'step',
'frequency': 1}
return [optimizer], [lr_scheduler]
class KoBARTConditionalGeneration(Base):
def __init__(self, hparams, **kwargs):
super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs)
self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model())
self.model.train()
self.bos_token = '<s>'
self.eos_token = '</s>'
self.pad_token_id = 0
self.tokenizer = get_kobart_tokenizer()
def forward(self, inputs):
attention_mask = inputs['input_ids'].ne(self.pad_token_id).float()
decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float()
return self.model(input_ids=inputs['input_ids'],
attention_mask=attention_mask,
decoder_input_ids=inputs['decoder_input_ids'],
decoder_attention_mask=decoder_attention_mask,
labels=inputs['labels'], return_dict=True)
def training_step(self, batch, batch_idx):
outs = self(batch)
loss = outs.loss
self.log('train_loss', loss, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
outs = self(batch)
loss = outs['loss']
return (loss)
def validation_epoch_end(self, outputs):
losses = []
for loss in outputs:
losses.append(loss)
self.log('val_loss', torch.stack(losses).mean(), prog_bar=True)
if __name__ == '__main__':
parser = Base.add_model_specific_args(parser)
parser = ArgsBase.add_model_specific_args(parser)
parser = KobartSummaryModule.add_model_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
logging.info(args)
model = KoBARTConditionalGeneration(args)
dm = KobartSummaryModule(args.train_file,
args.test_file,
None,
max_len=args.max_len,
batch_size=args.batch_size,
num_workers=args.num_workers)
checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss',
dirpath=args.default_root_dir,
filename='model_chp/{epoch:02d}-{val_loss:.3f}',
verbose=True,
save_last=True,
mode='min',
save_top_k=-1,
prefix='kobart_translation')
tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs'))
lr_logger = pl.callbacks.LearningRateMonitor()
trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger,
callbacks=[checkpoint_callback, lr_logger])
trainer.fit(model, dm)
| 1.976563 | 2 |
homeassistant/components/shelly/sensor.py | RavensburgOP/core | 1 | 2614 | <reponame>RavensburgOP/core
"""Sensor for Shelly."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Final, cast
import aioshelly
from homeassistant.components import sensor
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
DEGREE,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import dt
from . import ShellyDeviceWrapper
from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS
from .entity import (
BlockAttributeDescription,
RestAttributeDescription,
ShellyBlockAttributeEntity,
ShellyRestAttributeEntity,
ShellySleepingBlockAttributeEntity,
async_setup_entry_attribute_entities,
async_setup_entry_rest,
)
from .utils import get_device_uptime, temperature_unit
_LOGGER: Final = logging.getLogger(__name__)
SENSORS: Final = {
("device", "battery"): BlockAttributeDescription(
name="Battery",
unit=PERCENTAGE,
device_class=sensor.DEVICE_CLASS_BATTERY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
removal_condition=lambda settings, _: settings.get("external_power") == 1,
),
("device", "deviceTemp"): BlockAttributeDescription(
name="Device Temperature",
unit=temperature_unit,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_TEMPERATURE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
),
("emeter", "current"): BlockAttributeDescription(
name="Current",
unit=ELECTRIC_CURRENT_AMPERE,
value=lambda value: value,
device_class=sensor.DEVICE_CLASS_CURRENT,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("light", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
),
("device", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("emeter", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("emeter", "voltage"): BlockAttributeDescription(
name="Voltage",
unit=ELECTRIC_POTENTIAL_VOLT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_VOLTAGE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("emeter", "powerFactor"): BlockAttributeDescription(
name="Power Factor",
unit=PERCENTAGE,
value=lambda value: round(value * 100, 1),
device_class=sensor.DEVICE_CLASS_POWER_FACTOR,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("relay", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("roller", "rollerPower"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("device", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_UPTIME,
),
("emeter", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_NEVER,
),
("emeter", "energyReturned"): BlockAttributeDescription(
name="Energy Returned",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_NEVER,
),
("light", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
last_reset=LAST_RESET_UPTIME,
),
("relay", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_UPTIME,
),
("roller", "rollerEnergy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_UPTIME,
),
("sensor", "concentration"): BlockAttributeDescription(
name="Gas Concentration",
unit=CONCENTRATION_PARTS_PER_MILLION,
icon="mdi:gauge",
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("sensor", "extTemp"): BlockAttributeDescription(
name="Temperature",
unit=temperature_unit,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_TEMPERATURE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
available=lambda block: cast(bool, block.extTemp != 999),
),
("sensor", "humidity"): BlockAttributeDescription(
name="Humidity",
unit=PERCENTAGE,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_HUMIDITY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
available=lambda block: cast(bool, block.extTemp != 999),
),
("sensor", "luminosity"): BlockAttributeDescription(
name="Luminosity",
unit=LIGHT_LUX,
device_class=sensor.DEVICE_CLASS_ILLUMINANCE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("sensor", "tilt"): BlockAttributeDescription(
name="Tilt",
unit=DEGREE,
icon="mdi:angle-acute",
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("relay", "totalWorkTime"): BlockAttributeDescription(
name="Lamp Life",
unit=PERCENTAGE,
icon="mdi:progress-wrench",
value=lambda value: round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS), 1),
extra_state_attributes=lambda block: {
"Operational hours": round(block.totalWorkTime / 3600, 1)
},
),
("adc", "adc"): BlockAttributeDescription(
name="ADC",
unit=ELECTRIC_POTENTIAL_VOLT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_VOLTAGE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("sensor", "sensorOp"): BlockAttributeDescription(
name="Operation",
icon="mdi:cog-transfer",
value=lambda value: value,
extra_state_attributes=lambda block: {"self_test": block.selfTest},
),
}
REST_SENSORS: Final = {
"rssi": RestAttributeDescription(
name="RSSI",
unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
value=lambda status, _: status["wifi_sta"]["rssi"],
device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
),
"uptime": RestAttributeDescription(
name="Uptime",
value=get_device_uptime,
device_class=sensor.DEVICE_CLASS_TIMESTAMP,
default_enabled=False,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up sensors for device."""
if config_entry.data["sleep_period"]:
await async_setup_entry_attribute_entities(
hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor
)
else:
await async_setup_entry_attribute_entities(
hass, config_entry, async_add_entities, SENSORS, ShellySensor
)
await async_setup_entry_rest(
hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor
)
class ShellySensor(ShellyBlockAttributeEntity, SensorEntity):
"""Represent a shelly sensor."""
def __init__(
self,
wrapper: ShellyDeviceWrapper,
block: aioshelly.Block,
attribute: str,
description: BlockAttributeDescription,
) -> None:
"""Initialize sensor."""
super().__init__(wrapper, block, attribute, description)
self._last_value: float | None = None
if description.last_reset == LAST_RESET_NEVER:
self._attr_last_reset = dt.utc_from_timestamp(0)
elif description.last_reset == LAST_RESET_UPTIME:
self._attr_last_reset = (
dt.utcnow() - timedelta(seconds=wrapper.device.status["uptime"])
).replace(second=0, microsecond=0)
@property
def state(self) -> StateType:
"""Return value of sensor."""
if (
self.description.last_reset == LAST_RESET_UPTIME
and self.attribute_value is not None
):
value = cast(float, self.attribute_value)
if self._last_value and self._last_value > value:
self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0)
_LOGGER.info("Energy reset detected for entity %s", self.name)
self._last_value = value
return self.attribute_value
@property
def state_class(self) -> str | None:
"""State class of sensor."""
return self.description.state_class
@property
def unit_of_measurement(self) -> str | None:
"""Return unit of sensor."""
return cast(str, self._unit)
class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity):
"""Represent a shelly REST sensor."""
@property
def state(self) -> StateType:
"""Return value of sensor."""
return self.attribute_value
@property
def state_class(self) -> str | None:
"""State class of sensor."""
return self.description.state_class
@property
def unit_of_measurement(self) -> str | None:
"""Return unit of sensor."""
return self.description.unit
class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity):
"""Represent a shelly sleeping sensor."""
@property
def state(self) -> StateType:
"""Return value of sensor."""
if self.block is not None:
return self.attribute_value
return self.last_state
@property
def state_class(self) -> str | None:
"""State class of sensor."""
return self.description.state_class
@property
def unit_of_measurement(self) -> str | None:
"""Return unit of sensor."""
return cast(str, self._unit)
| 1.851563 | 2 |
tests/web/config.py | zcqian/biothings.api | 0 | 2615 | <filename>tests/web/config.py
"""
Web settings to override for testing.
"""
import os
from biothings.web.settings.default import QUERY_KWARGS
# *****************************************************************************
# Elasticsearch Variables
# *****************************************************************************
ES_INDEX = 'bts_test'
ES_DOC_TYPE = 'gene'
ES_SCROLL_SIZE = 60
# *****************************************************************************
# User Input Control
# *****************************************************************************
# use a smaller size for testing
QUERY_KWARGS['GET']['facet_size']['default'] = 3
QUERY_KWARGS['GET']['facet_size']['max'] = 5
QUERY_KWARGS['POST']['q']['jsoninput'] = True
# *****************************************************************************
# Elasticsearch Query Builder
# *****************************************************************************
ALLOW_RANDOM_QUERY = True
ALLOW_NESTED_AGGS = True
USERQUERY_DIR = os.path.join(os.path.dirname(__file__), 'userquery')
# *****************************************************************************
# Endpoints Specifics
# *****************************************************************************
STATUS_CHECK = {
'id': '1017',
'index': 'bts_test',
'doc_type': '_all'
}
| 1.539063 | 2 |
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py | rocheparadox/InvenTree | 656 | 2616 | """
Custom management command to rebuild thumbnail images
- May be required after importing a new dataset, for example
"""
import os
import logging
from PIL import UnidentifiedImageError
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.utils import OperationalError, ProgrammingError
from company.models import Company
from part.models import Part
logger = logging.getLogger("inventree-thumbnails")
class Command(BaseCommand):
"""
Rebuild all thumbnail images
"""
def rebuild_thumbnail(self, model):
"""
Rebuild the thumbnail specified by the "image" field of the provided model
"""
if not model.image:
return
img = model.image
url = img.thumbnail.name
loc = os.path.join(settings.MEDIA_ROOT, url)
if not os.path.exists(loc):
logger.info(f"Generating thumbnail image for '{img}'")
try:
model.image.render_variations(replace=False)
except FileNotFoundError:
logger.error(f"ERROR: Image file '{img}' is missing")
except UnidentifiedImageError:
logger.error(f"ERROR: Image file '{img}' is not a valid image")
def handle(self, *args, **kwargs):
logger.setLevel(logging.INFO)
logger.info("Rebuilding Part thumbnails")
for part in Part.objects.exclude(image=None):
try:
self.rebuild_thumbnail(part)
except (OperationalError, ProgrammingError):
logger.error("ERROR: Database read error.")
break
logger.info("Rebuilding Company thumbnails")
for company in Company.objects.exclude(image=None):
try:
self.rebuild_thumbnail(company)
except (OperationalError, ProgrammingError):
logger.error("ERROR: abase read error.")
break
| 2.421875 | 2 |
cogs/carbon.py | Baracchino-Della-Scuola/Bot | 6 | 2617 | <gh_stars>1-10
import discord
from discord.ext import commands
import urllib.parse
from .constants import themes, controls, languages, fonts, escales
import os
from pathlib import Path
from typing import Any
# from pyppeteer import launch
from io import *
import requests
def encode_url(text: str) -> str:
first_encoding = urllib.parse.quote(text, safe="*()")
return urllib.parse.quote(first_encoding, safe="*") # Carbonsh encodes text twice
def hex_to_rgb(hex: str) -> tuple:
"""
Args:
hex (str):
"""
return tuple(int(hex.lstrip("#")[i : i + 2], 16) for i in (0, 2, 4))
def parse_bg(background) -> str:
if background == "":
return "rgba(171, 184, 195, 1)"
elif background[0] == "#" or "(" not in background:
return f"rgba{hex_to_rgb(background) + (1,)}"
return background
def int_to_px(number) -> str:
return f"{number}px"
def int_to_percent(number) -> str:
return f"{number}%"
def trim_url(text: str) -> str:
if len(text) < 2000:
return text
if "%25" not in text:
return text[:2000]
if text[:2003][:-3] == "%25":
return text[:2000]
last_percent = text[:2000].rindex("%25")
return text[:last_percent]
_carbon_url = "https://carbonnowsh.herokuapp.com/"
def code_to_url(code: str) -> str:
return f"{_carbon_url}?&code={trim_url(encode_url(code))}"
class Carbon(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def carbonate(self, ctx, *, code):
carbon_url = code_to_url(code)
r = requests.get(carbon_url)
b = BytesIO(r.content)
await ctx.send(file=discord.File(fp=b, filename="code.png"))
async def setup(bot):
await bot.add_cog(Carbon(bot))
| 2.671875 | 3 |
examples/show_artist.py | jimcortez/spotipy_twisted | 0 | 2618 | <gh_stars>0
# shows artist info for a URN or URL
import spotipy_twisted
import sys
import pprint
if len(sys.argv) > 1:
urn = sys.argv[1]
else:
urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu'
sp = spotipy_twisted.Spotify()
artist = sp.artist(urn)
pprint.pprint(artist)
| 2.53125 | 3 |
examples/mcp3xxx_mcp3002_single_ended_simpletest.py | sommersoft/Adafruit_CircuitPython_MCP3xxx | 0 | 2619 | <filename>examples/mcp3xxx_mcp3002_single_ended_simpletest.py
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3002 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D5)
# create the mcp object
mcp = MCP.MCP3002(spi, cs)
# create an analog input channel on pin 0
chan = AnalogIn(mcp, MCP.P0)
print("Raw ADC Value: ", chan.value)
print("ADC Voltage: " + str(chan.voltage) + "V")
| 3.125 | 3 |
glue/core/data_factories/tables.py | rosteen/glue | 550 | 2620 | from glue.core.data_factories.helpers import has_extension
from glue.config import data_factory
__all__ = ['tabular_data']
@data_factory(label="ASCII Table",
identifier=has_extension('csv txt tsv tbl dat '
'csv.gz txt.gz tbl.bz '
'dat.gz'),
priority=1)
def tabular_data(path, **kwargs):
from glue.core.data_factories.astropy_table import astropy_tabular_data
from glue.core.data_factories.pandas import pandas_read_table
for fac in [astropy_tabular_data, pandas_read_table]:
try:
return fac(path, **kwargs)
except Exception:
pass
else:
raise IOError("Could not parse file: %s" % path)
| 2.640625 | 3 |
code_doc/views/author_views.py | coordt/code_doc | 0 | 2621 | <filename>code_doc/views/author_views.py
from django.shortcuts import render
from django.http import Http404
from django.views.generic.edit import UpdateView
from django.views.generic import ListView, View
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.decorators import method_decorator
import logging
from ..models.projects import Project
from ..models.authors import Author
from ..forms import AuthorForm
from .permission_helpers import PermissionOnObjectViewMixin
# logger for this file
logger = logging.getLogger(__name__)
class AuthorListView(ListView):
"""A generic view of the authors in a list"""
paginate_by = 10
template_name = "code_doc/authors/author_list.html"
context_object_name = "authors"
model = Author
def detail_author(request, author_id):
try:
author = Author.objects.get(pk=author_id)
except Author.DoesNotExist:
raise Http404
project_list = Project.objects.filter(authors=author)
coauthor_list = (
Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id)
)
return render(
request,
"code_doc/authors/author_details.html",
{
"project_list": project_list,
"author": author,
"user": request.user,
"coauthor_list": coauthor_list,
},
)
class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView):
"""View for editing information about an Author
.. note:: in order to be able to edit an Author, the user should have the
'code_doc.author_edit' permission on the Author object.
"""
form_class = AuthorForm
model = Author
permissions_on_object = ("code_doc.author_edit",)
permissions_object_getter = "get_author_from_request"
template_name = "code_doc/authors/author_edit.html"
pk_url_kwarg = "author_id"
def get_author_from_request(self, request, *args, **kwargs):
# TODO check if needed
try:
return Author.objects.get(pk=kwargs["author_id"])
except Author.DoesNotExist:
logger.warning(
"[AuthorUpdateView] non existent Author with id %s", kwargs["author_id"]
)
return None
class MaintainerProfileView(View):
"""Manages the views associated to the maintainers"""
@method_decorator(login_required)
def get(self, request, maintainer_id):
try:
maintainer = User.objects.get(pk=maintainer_id)
except Project.DoesNotExist:
raise Http404
projects = Project.objects.filter(administrators=maintainer)
return render(
request,
"code_doc/maintainer_details.html",
{"projects": projects, "maintainer": maintainer},
)
@method_decorator(login_required)
def post(self, request):
pass
| 2.359375 | 2 |
d00dfeed/analyses/print_sloc_per_soc.py | rehosting/rehosting_sok | 4 | 2622 | # External deps
import os, sys, json
from pathlib import Path
from typing import Dict, List
# Internal deps
os.chdir(sys.path[0])
sys.path.append("..")
import df_common as dfc
import analyses_common as ac
# Generated files directory
GEN_FILE_DIR = str(Path(__file__).resolve().parent.parent) + os.sep + "generated_files" # TODO: ugly parent.parent pathing
if os.path.exists(GEN_FILE_DIR):
sys.path.append(GEN_FILE_DIR)
if os.path.exists(os.path.join(GEN_FILE_DIR, "sloc_cnt.py")):
from sloc_cnt import DRIVER_NAME_TO_SLOC
else:
print("Error: no SLOC file! Run \'df_analyze.py\' with \'--linux-src-dir\'")
sys.exit(1)
if __name__ == "__main__":
json_files = ac.argparse_and_get_files("Graph SLOC/SoC data")
soc_sloc_by_arch: Dict[str, List[int]] = {}
print("Gathering SLOC average by arch...")
from graph_dd_sloc_by_arch import get_sloc_avg_and_list_by_arch
cmp_by_arch = ac.build_dict_two_lvl_cnt(json_files, dfc.JSON_ARC, dfc.JSON_CMP_STR)
avg_sloc_by_arch, sloc_list_by_arch = get_sloc_avg_and_list_by_arch(cmp_by_arch, verbose = False)
# Collection
print("Iterating DTBs/SoCs...")
for dtb_json in json_files:
with open(dtb_json) as json_file:
data = json.load(json_file)
soc_sloc = 0
arch = data[dfc.JSON_ARC]
cmp_strs = data[dfc.JSON_CMP_STR]
# Total SLOC for this SoC
for cmp_str in cmp_strs:
driver_sloc = dfc.cmp_str_to_sloc(cmp_str)
if not driver_sloc: # Closed-source driver
driver_sloc = avg_sloc_by_arch[arch]
soc_sloc += driver_sloc
#print("{}: {}".format(cmp_str, driver_sloc))
if arch not in soc_sloc_by_arch:
soc_sloc_by_arch[arch] = []
else:
soc_sloc_by_arch[arch].append(soc_sloc)
print("{} ({}): {}".format(dtb_json.split(os.sep)[-1], arch, soc_sloc))
# Final stats
ac.print_mean_median_std_dev_for_dict_of_lists(soc_sloc_by_arch,
"\nSloc Per Soc, format: [arch : (mean, median, std_dev)]\n")
| 2.296875 | 2 |
mingpt/lr_decay.py | asigalov61/minGPT | 18 | 2623 | import math
import pytorch_lightning as pl
class LearningRateDecayCallback(pl.Callback):
def __init__(self, learning_rate, warmup_tokens=<PASSWORD>, final_tokens=<PASSWORD>, lr_decay=True):
super().__init__()
self.learning_rate = learning_rate
self.tokens = 0
self.final_tokens = final_tokens
self.lr_decay = lr_decay
self.warmup_tokens = warmup_tokens
def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
optimizer = trainer.optimizers[0]
_, y = batch
if self.lr_decay:
self.tokens += (y >= 0).sum() # number of tokens processed this step (i.e. label is not -100)
if self.tokens < self.warmup_tokens:
# linear warmup
lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens))
else:
# cosine learning rate decay
progress = float(self.tokens - self.warmup_tokens) / float(
max(1, self.final_tokens - self.warmup_tokens))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = self.learning_rate * lr_mult
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 2.421875 | 2 |
apprise/config/ConfigBase.py | calvinbui/apprise | 0 | 2624 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import six
import yaml
import time
from .. import plugins
from ..AppriseAsset import AppriseAsset
from ..URLBase import URLBase
from ..common import ConfigFormat
from ..common import CONFIG_FORMATS
from ..common import ContentIncludeMode
from ..utils import GET_SCHEMA_RE
from ..utils import parse_list
from ..utils import parse_bool
from ..utils import parse_urls
from . import SCHEMA_MAP
# Test whether token is valid or not
VALID_TOKEN = re.compile(
r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I)
class ConfigBase(URLBase):
"""
This is the base class for all supported configuration sources
"""
# The Default Encoding to use if not otherwise detected
encoding = 'utf-8'
# The default expected configuration format unless otherwise
# detected by the sub-modules
default_config_format = ConfigFormat.TEXT
# This is only set if the user overrides the config format on the URL
# this should always initialize itself as None
config_format = None
# Don't read any more of this amount of data into memory as there is no
# reason we should be reading in more. This is more of a safe guard then
# anything else. 128KB (131072B)
max_buffer_size = 131072
# By default all configuration is not includable using the 'include'
# line found in configuration files.
allow_cross_includes = ContentIncludeMode.NEVER
# the config path manages the handling of relative include
config_path = os.getcwd()
def __init__(self, cache=True, recursion=0, insecure_includes=False,
**kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
recursion defines how deep we recursively handle entries that use the
`include` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `include` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure_include by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can include another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
include a file:// one it woul fail. However this include would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains 'include'
entries (even file:// based ones). In these circumstances if you want
these 'include' entries to be honored, this value must be set to True.
"""
super(ConfigBase, self).__init__(**kwargs)
# Tracks the time the content was last retrieved on. This place a role
# for cases where we are not caching our response and are required to
# re-retrieve our settings.
self._cached_time = None
# Tracks previously loaded content for speed
self._cached_servers = None
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if 'encoding' in kwargs:
# Store the encoding
self.encoding = kwargs.get('encoding')
if 'format' in kwargs \
and isinstance(kwargs['format'], six.string_types):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
if self.config_format not in CONFIG_FORMATS:
# Simple error checking
err = 'An invalid config format ({}) was specified.'.format(
self.config_format)
self.logger.warning(err)
raise TypeError(err)
# Set our cache flag; it can be True or a (positive) integer
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
except (ValueError, TypeError):
err = 'An invalid cache value ({}) was specified.'.format(cache)
self.logger.warning(err)
raise TypeError(err)
return
def servers(self, asset=None, **kwargs):
"""
Performs reads loaded configuration and returns all of the services
that could be parsed and loaded.
"""
if not self.expired():
# We already have cached results to return; use them
return self._cached_servers
# Our cached response object
self._cached_servers = list()
# read() causes the child class to do whatever it takes for the
# config plugin to load the data source and return unparsed content
# None is returned if there was an error or simply no data
content = self.read(**kwargs)
if not isinstance(content, six.string_types):
# Set the time our content was cached at
self._cached_time = time.time()
# Nothing more to do; return our empty cache list
return self._cached_servers
# Our Configuration format uses a default if one wasn't one detected
# or enfored.
config_format = \
self.default_config_format \
if self.config_format is None else self.config_format
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Initialize our asset object
asset = asset if isinstance(asset, AppriseAsset) else self.asset
# Execute our config parse function which always returns a tuple
# of our servers and our configuration
servers, configs = fn(content=content, asset=asset)
self._cached_servers.extend(servers)
# Configuration files were detected; recursively populate them
# If we have been configured to do so
for url in configs:
if self.recursion > 0:
# Attempt to acquire the schema at the very least to allow
# our configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
if not os.path.isabs(url):
# We're dealing with a relative path; prepend
# our current config path
url = os.path.join(self.config_path, url)
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in SCHEMA_MAP:
ConfigBase.logger.warning(
'Unsupported include schema {}.'.format(schema))
continue
# Parse our url details of the server object as dictionary
# containing all of the information parsed from our URL
results = SCHEMA_MAP[schema].parse_url(url)
if not results:
# Failed to parse the server URL
self.logger.warning(
'Unparseable include URL {}'.format(url))
continue
# Handle cross inclusion based on allow_cross_includes rules
if (SCHEMA_MAP[schema].allow_cross_includes ==
ContentIncludeMode.STRICT
and schema not in self.schemas()
and not self.insecure_includes) or \
SCHEMA_MAP[schema].allow_cross_includes == \
ContentIncludeMode.NEVER:
# Prevent the loading if insecure base protocols
ConfigBase.logger.warning(
'Including {}:// based configuration is prohibited. '
'Ignoring URL {}'.format(schema, url))
continue
# Prepare our Asset Object
results['asset'] = asset
# No cache is required because we're just lumping this in
# and associating it with the cache value we've already
# declared (prior to our recursion)
results['cache'] = False
# Recursion can never be parsed from the URL; we decrement
# it one level
results['recursion'] = self.recursion - 1
# Insecure Includes flag can never be parsed from the URL
results['insecure_includes'] = self.insecure_includes
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
cfg_plugin = SCHEMA_MAP[results['schema']](**results)
except Exception as e:
# the arguments are invalid or can not be used.
self.logger.warning(
'Could not load include URL: {}'.format(url))
self.logger.debug('Loading Exception: {}'.format(str(e)))
continue
# if we reach here, we can now add this servers found
# in this configuration file to our list
self._cached_servers.extend(
cfg_plugin.servers(asset=asset))
# We no longer need our configuration object
del cfg_plugin
else:
self.logger.debug(
'Recursion limit reached; ignoring Include URL: %s' % url)
if self._cached_servers:
self.logger.info('Loaded {} entries from {}'.format(
len(self._cached_servers), self.url()))
else:
self.logger.warning(
'Failed to load Apprise configuration from {}'.format(
self.url()))
# Set the time our content was cached at
self._cached_time = time.time()
return self._cached_servers
def read(self):
"""
This object should be implimented by the child classes
"""
return None
def expired(self):
"""
Simply returns True if the configuration should be considered
as expired or False if content should be retrieved.
"""
if isinstance(self._cached_servers, list) and self.cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if self.cache is True:
# we have not expired, return False
return False
# Verify our cache time to determine whether we will get our
# content again.
age_in_sec = time.time() - self._cached_time
if age_in_sec <= self.cache:
# We have not expired; return False
return False
# If we reach here our configuration should be considered
# missing and/or expired.
return True
@staticmethod
def parse_url(url, verify_host=True):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(url, verify_host=verify_host)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default config format
if 'format' in results['qsd']:
results['format'] = results['qsd'].get('format')
if results['format'] not in CONFIG_FORMATS:
URLBase.logger.warning(
'Unsupported format specified {}'.format(
results['format']))
del results['format']
# Defines the encoding of the payload
if 'encoding' in results['qsd']:
results['encoding'] = results['qsd'].get('encoding')
# Our cache value
if 'cache' in results['qsd']:
# First try to get it's integer value
try:
results['cache'] = int(results['qsd']['cache'])
except (ValueError, TypeError):
# No problem, it just isn't an integer; now treat it as a bool
# instead:
results['cache'] = parse_bool(results['qsd']['cache'])
return results
@staticmethod
def detect_config_format(content, **kwargs):
"""
Takes the specified content and attempts to detect the format type
The function returns the actual format type if detected, otherwise
it returns None
"""
# Detect Format Logic:
# - A pound/hashtag (#) is alawys a comment character so we skip over
# lines matched here.
# - Detection begins on the first non-comment and non blank line
# matched.
# - If we find a string followed by a colon, we know we're dealing
# with a YAML file.
# - If we find a string that starts with a URL, or our tag
# definitions (accepting commas) followed by an equal sign we know
# we're dealing with a TEXT format.
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'
r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise configuration specified.')
return None
# By default set our return value to None since we don't know
# what the format is yet
config_format = None
# iterate over each line of the file to attempt to detect it
# stop the moment a the type has been determined
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Undetectable Apprise configuration found '
'based on line {}.'.format(line))
# Take an early exit
return None
# Attempt to detect configuration
if result.group('yaml'):
config_format = ConfigFormat.YAML
ConfigBase.logger.debug(
'Detected YAML configuration '
'based on line {}.'.format(line))
break
elif result.group('text'):
config_format = ConfigFormat.TEXT
ConfigBase.logger.debug(
'Detected TEXT configuration '
'based on line {}.'.format(line))
break
# If we reach here, we have a comment entry
# Adjust default format to TEXT
config_format = ConfigFormat.TEXT
return config_format
@staticmethod
def config_parse(content, asset=None, config_format=None, **kwargs):
"""
Takes the specified config content and loads it based on the specified
config_format. If a format isn't specified, then it is auto detected.
"""
if config_format is None:
# Detect the format
config_format = ConfigBase.detect_config_format(content)
if not config_format:
# We couldn't detect configuration
ConfigBase.logger.error('Could not detect configuration')
return (list(), list())
if config_format not in CONFIG_FORMATS:
# Invalid configuration type specified
ConfigBase.logger.error(
'An invalid configuration format ({}) was specified'.format(
config_format))
return (list(), list())
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Execute our config parse function which always returns a list
return fn(content=content, asset=asset)
@staticmethod
def config_parse_text(content, asset=None):
"""
Parse the specified content as though it were a simple text file only
containing a list of URLs.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may also optionally associate an asset with the notification.
The file syntax is:
#
# pound/hashtag allow for line comments
#
# One or more tags can be idenified using comma's (,) to separate
# them.
<Tag(s)>=<URL>
# Or you can use this format (no tags associated)
<URL>
# you can also use the keyword 'include' and identify a
# configuration location (like this file) which will be included
# as additional configuration entries when loaded.
include <ConfigURL>
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(\s*(?P<tags>[^=]+)=|=)?\s*'
r'(?P<url>[a-z0-9]{2,9}://.*)|'
r'include\s+(?P<config>.+))?\s*$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise TEXT based configuration specified.')
return (list(), list())
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise TEXT configuration format found '
'{} on line {}.'.format(entry, line))
# Assume this is a file we shouldn't be parsing. It's owner
# can read the error printed to screen and take action
# otherwise.
return (list(), list())
url, config = result.group('url'), result.group('config')
if not (url or config):
# Comment/empty line; do nothing
continue
if config:
ConfigBase.logger.debug('Include URL: {}'.format(config))
# Store our include line
configs.append(config.strip())
continue
# Acquire our url tokens
results = plugins.url_to_dict(url)
if results is None:
# Failed to parse the server URL
ConfigBase.logger.warning(
'Unparseable URL {} on line {}.'.format(url, line))
continue
# Build a list of tags to associate with the newly added
# notifications if any were set
results['tag'] = set(parse_list(result.group('tags')))
# Prepare our Asset Object
results['asset'] = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = plugins.SCHEMA_MAP[results['schema']](**results)
# Create log entry of loaded URL
ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url()))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load URL {} on line {}.'.format(
url, line))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
# Return what was loaded
return (servers, configs)
@staticmethod
def config_parse_yaml(content, asset=None):
"""
Parse the specified content as though it were a yaml file
specifically formatted for Apprise.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may optionally associate an asset with the notification.
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
try:
# Load our data (safely)
result = yaml.load(content, Loader=yaml.SafeLoader)
except (AttributeError,
yaml.parser.ParserError,
yaml.error.MarkedYAMLError) as e:
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML data specified.')
ConfigBase.logger.debug(
'YAML Exception:{}{}'.format(os.linesep, e))
return (list(), list())
if not isinstance(result, dict):
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML based configuration specified.')
return (list(), list())
# YAML Version
version = result.get('version', 1)
if version != 1:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise YAML version specified {}.'.format(version))
return (list(), list())
#
# global asset object
#
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
tokens = result.get('asset', None)
if tokens and isinstance(tokens, dict):
for k, v in tokens.items():
if k.startswith('_') or k.endswith('_'):
# Entries are considered reserved if they start or end
# with an underscore
ConfigBase.logger.warning(
'Ignored asset key "{}".'.format(k))
continue
if not (hasattr(asset, k) and
isinstance(getattr(asset, k),
(bool, six.string_types))):
# We can't set a function or non-string set value
ConfigBase.logger.warning(
'Invalid asset key "{}".'.format(k))
continue
if v is None:
# Convert to an empty string
v = ''
if (isinstance(v, (bool, six.string_types))
and isinstance(getattr(asset, k), bool)):
# If the object in the Asset is a boolean, then
# we want to convert the specified string to
# match that.
setattr(asset, k, parse_bool(v))
elif isinstance(v, six.string_types):
# Set our asset object with the new value
setattr(asset, k, v.strip())
else:
# we must set strings with a string
ConfigBase.logger.warning(
'Invalid asset value to "{}".'.format(k))
continue
#
# global tag root directive
#
global_tags = set()
tags = result.get('tag', None)
if tags and isinstance(tags, (list, tuple, six.string_types)):
# Store any preset tags
global_tags = set(parse_list(tags))
#
# include root directive
#
includes = result.get('include', None)
if isinstance(includes, six.string_types):
# Support a single inline string or multiple ones separated by a
# comma and/or space
includes = parse_urls(includes)
elif not isinstance(includes, (list, tuple)):
# Not a problem; we simply have no includes
includes = list()
# Iterate over each config URL
for no, url in enumerate(includes):
if isinstance(url, six.string_types):
# Support a single inline string or multiple ones separated by
# a comma and/or space
configs.extend(parse_urls(url))
elif isinstance(url, dict):
# Store the url and ignore arguments associated
configs.extend(u for u in url.keys())
#
# urls root directive
#
urls = result.get('urls', None)
if not isinstance(urls, (list, tuple)):
# Not a problem; we simply have no urls
urls = list()
# Iterate over each URL
for no, url in enumerate(urls):
# Our results object is what we use to instantiate our object if
# we can. Reset it to None on each iteration
results = list()
if isinstance(url, six.string_types):
# We're just a simple URL string...
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Invalid URL {}, entry #{}'.format(url, no + 1))
continue
# We found a valid schema worthy of tracking; store it's
# details:
_results = plugins.url_to_dict(url)
if _results is None:
ConfigBase.logger.warning(
'Unparseable URL {}, entry #{}'.format(
url, no + 1))
continue
# add our results to our global set
results.append(_results)
elif isinstance(url, dict):
# We are a url string with additional unescaped options. In
# this case we want to iterate over all of our options so we
# can at least tell the end user what entries were ignored
# due to errors
if six.PY2:
it = url.iteritems()
else: # six.PY3
it = iter(url.items())
# Track the URL to-load
_url = None
# Track last acquired schema
schema = None
for key, tokens in it:
# Test our schema
_schema = GET_SCHEMA_RE.match(key)
if _schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Ignored entry {} found under urls, entry #{}'
.format(key, no + 1))
continue
# Store our schema
schema = _schema.group('schema').lower()
# Store our URL and Schema Regex
_url = key
if _url is None:
# the loop above failed to match anything
ConfigBase.logger.warning(
'Unsupported URL, entry #{}'.format(no + 1))
continue
_results = plugins.url_to_dict(_url)
if _results is None:
# Setup dictionary
_results = {
# Minimum requirements
'schema': schema,
}
if isinstance(tokens, (list, tuple, set)):
# populate and/or override any results populated by
# parse_url()
for entries in tokens:
# Copy ourselves a template of our parsed URL as a base
# to work with
r = _results.copy()
# We are a url string with additional unescaped options
if isinstance(entries, dict):
if six.PY2:
_url, tokens = next(url.iteritems())
else: # six.PY3
_url, tokens = next(iter(url.items()))
# Tags you just can't over-ride
if 'schema' in entries:
del entries['schema']
# support our special tokens (if they're present)
if schema in plugins.SCHEMA_MAP:
entries = ConfigBase.__extract_special_tokens(
schema, entries)
# Extend our dictionary with our new entries
r.update(entries)
# add our results to our global set
results.append(r)
elif isinstance(tokens, dict):
# support our special tokens (if they're present)
if schema in plugins.SCHEMA_MAP:
tokens = ConfigBase.__extract_special_tokens(
schema, tokens)
# Copy ourselves a template of our parsed URL as a base to
# work with
r = _results.copy()
# add our result set
r.update(tokens)
# add our results to our global set
results.append(r)
else:
# add our results to our global set
results.append(_results)
else:
# Unsupported
ConfigBase.logger.warning(
'Unsupported Apprise YAML entry #{}'.format(no + 1))
continue
# Track our entries
entry = 0
while len(results):
# Increment our entry count
entry += 1
# Grab our first item
_results = results.pop(0)
# tag is a special keyword that is managed by Apprise object.
# The below ensures our tags are set correctly
if 'tag' in _results:
# Tidy our list up
_results['tag'] = \
set(parse_list(_results['tag'])) | global_tags
else:
# Just use the global settings
_results['tag'] = global_tags
for key in list(_results.keys()):
# Strip out any tokens we know that we can't accept and
# warn the user
match = VALID_TOKEN.match(key)
if not match:
ConfigBase.logger.warning(
'Ignoring invalid token ({}) found in YAML '
'configuration entry #{}, item #{}'
.format(key, no + 1, entry))
del _results[key]
ConfigBase.logger.trace(
'URL #{}: {} unpacked as:{}{}'
.format(no + 1, url, os.linesep, os.linesep.join(
['{}="{}"'.format(k, a)
for k, a in _results.items()])))
# Prepare our Asset Object
_results['asset'] = asset
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = plugins.SCHEMA_MAP[_results['schema']](**_results)
# Create log entry of loaded URL
ConfigBase.logger.debug(
'Loaded URL: {}'.format(plugin.url()))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load Apprise YAML configuration '
'entry #{}, item #{}'
.format(no + 1, entry))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
return (servers, configs)
def pop(self, index=-1):
"""
Removes an indexed Notification Service from the stack and returns it.
By default, the last element of the list is removed.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
# Pop the element off of the stack
return self._cached_servers.pop(index)
@staticmethod
def __extract_special_tokens(schema, tokens):
"""
This function takes a list of tokens and updates them to no longer
include any special tokens such as +,-, and :
- schema must be a valid schema of a supported plugin type
- tokens must be a dictionary containing the yaml entries parsed.
The idea here is we can post process a set of tokens provided in
a YAML file where the user provided some of the special keywords.
We effectivley look up what these keywords map to their appropriate
value they're expected
"""
# Create a copy of our dictionary
tokens = tokens.copy()
for kw, meta in plugins.SCHEMA_MAP[schema]\
.template_kwargs.items():
# Determine our prefix:
prefix = meta.get('prefix', '+')
# Detect any matches
matches = \
{k[1:]: str(v) for k, v in tokens.items()
if k.startswith(prefix)}
if not matches:
# we're done with this entry
continue
if not isinstance(tokens.get(kw, None), dict):
# Invalid; correct it
tokens[kw] = dict()
# strip out processed tokens
tokens = {k: v for k, v in tokens.items()
if not k.startswith(prefix)}
# Update our entries
tokens[kw].update(matches)
# Return our tokens
return tokens
def __getitem__(self, index):
"""
Returns the indexed server entry associated with the loaded
notification servers
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return self._cached_servers[index]
def __iter__(self):
"""
Returns an iterator to our server list
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return iter(self._cached_servers)
def __len__(self):
"""
Returns the total number of servers loaded
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return len(self._cached_servers)
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an Python 3.x based 'if
statement'. True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
def __nonzero__(self):
"""
Allows the Apprise object to be wrapped in an Python 2.x based 'if
statement'. True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
| 1.375 | 1 |
ffmpeg_util.py | manuel-fischer/ScrollRec | 0 | 2625 | import sys
import subprocess
from subprocess import Popen, PIPE
AV_LOG_QUIET = "quiet"
AV_LOG_PANIC = "panic"
AV_LOG_FATAL = "fatal"
AV_LOG_ERROR = "error"
AV_LOG_WARNING = "warning"
AV_LOG_INFO = "info"
AV_LOG_VERBOSE = "verbose"
AV_LOG_DEBUG = "debug"
ffmpeg_loglevel = AV_LOG_ERROR
IS_WIN32 = 'win32' in str(sys.platform).lower()
SUBPROCESS_ARGS = {}
if IS_WIN32:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
SUBPROCESS_ARGS['startupinfo'] = startupinfo
def popen_ffmpeg(inner_args):
cmd = [
'ffmpeg',
*inner_args,
# logging
'-loglevel', ffmpeg_loglevel,
'-hide_banner',
]
process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS)
stdout, stderr = process.communicate()
print(stderr.decode(), end='', file=sys.stderr)
return stdout, stderr | 2.515625 | 3 |
setup.py | rizar/CLOSURE | 14 | 2626 | from setuptools import setup
setup(
name="nmn-iwp",
version="0.1",
keywords="",
packages=["vr", "vr.models"]
)
| 1 | 1 |
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py | lefevre-fraser/openmeta-mms | 0 | 2627 | """ Represent a triangulated surface using a 3D boolean grid"""
import logging
import numpy as np
from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element
from rpl.tools.geometry import geom_utils
import data_io
class BSP_Grid(object):
def __init__(self, node_array, tris, allocate_step=100000):
"""
Store the triangles with an enumeration so that even when they are subdivided their
identity is not lost.
"""
tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1))
minus_ones = -np.ones((len(tris), 6), dtype=np.int32)
self.tris = np.hstack((tris, minus_ones, tri_nums))
self.allocate_step = allocate_step
self.node_array = node_array # Reference to the full list of nodes
self._resize()
self.next_free = len(node_array)
self.split_cache = np.zeros(len(self.tris), dtype=np.int32)
def _resize(self):
"""
Increase node array size by the allocate_step amount.
"""
self.array_size = len(self.node_array) + self.allocate_step
self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3))))
def add_node(self, node):
"""
Adds a new node to the end of the node array (expanding if required). Returns the index of
the newly added node.
"""
if self.next_free == self.array_size:
self._resize()
self.node_array[self.next_free] = node
self.next_free += 1
return self.next_free - 1
def prepare_add(self, num_add_nodes):
"""
Make sure that ``num_add_nodes`` can be added later without needing a resize.
Useful if adding nodes from within cython where resizing is tricky.
"""
if self.next_free + num_add_nodes >= self.array_size:
self._resize()
return self.next_free
def make_grid(veh_surfs, settings):
"""
Make coordinates of voxelated grid based on overall list of vehicle surfaces
"""
## Find overall bounding box
x_min, x_max = 1e30, -1e30
y_min, y_max = 1e30, -1e30
z_min, z_max = 1e30, -1e30
for key, veh_surf in veh_surfs.items():
x_min, x_max = min(x_min, np.min(veh_surf["x"])), max(x_max, np.max(veh_surf["x"]))
y_min, y_max = min(y_min, np.min(veh_surf["y"])), max(y_max, np.max(veh_surf["y"]))
z_min, z_max = min(z_min, np.min(veh_surf["z"])), max(z_max, np.max(veh_surf["z"]))
x_min, x_max = x_min - settings["voxel_size"], x_max + settings["voxel_size"]
y_min, y_max = y_min - settings["voxel_size"], y_max + settings["voxel_size"]
z_min, z_max = z_min - settings["voxel_size"], z_max + settings["voxel_size"]
###########################################
# Create the uniformly spaced grid points
x_grid = np.arange(x_min, x_max + settings["voxel_size"], settings["voxel_size"])
y_grid = np.arange(y_min, y_max + settings["voxel_size"], settings["voxel_size"])
z_grid = np.arange(z_min, z_max + settings["voxel_size"], settings["voxel_size"])
return x_grid, y_grid, z_grid
def convert_geom(veh_surf, tr_mat):
"""
Rotate nodes using provided transformation matrix; convert xyz node dict to nodes array
"""
veh_surf["nodes"] = np.vstack((veh_surf["x"], veh_surf["y"], veh_surf["z"])).T
veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3])
veh_surf["x"] = veh_surf['nodes'][:, 0]
veh_surf["y"] = veh_surf['nodes'][:, 1]
veh_surf["z"] = veh_surf['nodes'][:, 2]
return veh_surf
def find_occupied_voxels(surf, surf_mask, voxel_data):
"""
Voxels with any triangle from ``surf`` are considered occupied and or'ed with ``group_mask``.
If the supplied ``occupied_voxels`` is None a voxel array is created and returned.
"""
nodes = surf["nodes"]
tris = surf["tris"]
x_pts, y_pts, z_pts = [voxel_data[k] for k in ("x_grid", "y_grid", "z_grid")]
vox_size = voxel_data["vox_size"]
## Find the local extents of this part
min_x, max_x = np.min(surf["x"]) - vox_size, np.max(surf["x"]) + vox_size
min_y, max_y = np.min(surf["y"]) - vox_size, np.max(surf["y"]) + vox_size
min_z, max_z = np.min(surf["z"]) - vox_size, np.max(surf["z"]) + vox_size
b_tree = BSP_Grid(nodes, tris)
# Create BSP tree elements- we're not using a tree, but we are using some of the functions
b_x_root = BSP_Element(b_tree.tris, b_tree)
size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts)
## Create the occupied voxels if none were supplied
if voxel_data["value"] is None:
voxel_data["value"] = np.zeros((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32)
occupied_voxels = voxel_data["value"]
## The [1:] is because to make n voxels in a given direction we need n-1 splits
for i, x_pos in enumerate(x_pts[1:]):
if x_pos < min_x: continue
if x_pos > max_x: break
b_above_x, b_below_x = b_x_root.split_at(0, x_pos)
b_y_root = b_below_x
for j, y_pos in enumerate(y_pts[1:]):
if b_y_root is None:
break
if y_pos < min_y: continue
if y_pos > max_y: break
b_above_y, b_below_y = b_y_root.split_at(1, y_pos)
b_z_root = b_below_y
for k, z_pos in enumerate(z_pts[1:]):
if b_z_root is None:
break
if z_pos < min_z: continue
if z_pos > max_z: break
b_above_z, b_below_z = b_z_root.split_at(2, z_pos)
if not (b_below_z and (len(b_below_z.tris) == 0)):
## There is at least part of triangle here so mark as occupied
occupied_voxels[i, j, k] |= surf_mask
b_z_root = b_above_z
b_y_root = b_above_y
b_x_root = b_above_x
return voxel_data
#############
# Main code
def main(vehicle_comp_coords, tr_mat, voxel_masks, settings):
"""
Perform voxelization for all vehicle geometries in a list of parts. Combine on a uniform grid.
"""
for key, veh_surf in vehicle_comp_coords.items():
# Convert coordinates and find overall best bounding box
veh_surf = convert_geom(veh_surf, tr_mat)
x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings)
voxel_data = {"x_grid": x_grid,
"y_grid": y_grid,
"z_grid": z_grid,
"vox_size": settings["voxel_size"],
"csys_trans": tr_mat,
"value": None}
for key, veh_surf in vehicle_comp_coords.items():
# Build up the voxel_data
logging.debug("Sampling component: {}".format(key))
## Default mask is 1 for anything not in an identified set
surf_mask = 1
for mask, geo_set in voxel_masks.items():
if veh_surf['part_class'] in geo_set:
surf_mask |= mask
voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data)
return voxel_data
if __name__ == "__main__":
from rpl.tools.api import test_bench_api as tb_api
SETTINGS = tb_api.load_settings("settings.js")
DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'}
HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'}
HULLS = {"Hull_Assembly_Parametric", 'Hull_Assembly_Example_With_Connector'}
MANIKINS = {"Manikin"}
# Special labels applied to specific types of voxels
VOXEL_LABELS = {2: HULLS,
4: DOORS,
8: HATCHES,
16: MANIKINS}
vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False)
# Modify node coords so object aligns with cartesian axes of occ voxel grid, +z=up
# Vector to rotate around is cross product of current z axis and sfc normal
veh_up = np.array([0., 1., 0.])
rot_around = np.cross(veh_up, np.array([0, 0, 1]))
rot_ang = -np.arccos(veh_up[2])
tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang)
# voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
vox_veh_folder = r"voxelated_models/vehicles/{}/{}".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
vox_veh_file = "voxels_{}_vox{}_hacked".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
try:
voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True)
except:
voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
from mayavi import mlab
xo, yo, zo = np.where(voxel_data["value"] == 1)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.9, 0.9, 0.9),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 2)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1, 1, 1),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=0.05)
xo, yo, zo = np.where(voxel_data["value"] & 4)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1.0, 0.5, 0.5),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 8)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.6, 0.6, 1.0),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
# No manikins included, no need to plot them
# xo, yo, zo = np.where(voxel_data["value"] & 16)
# plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
# voxel_data["y_grid"][yo],
# voxel_data["z_grid"][zo],
# color=(0.5, 1.0, 0.8),
# scale_mode="none", scale_factor=voxel_data["vox_size"],
# mode='cube', opacity=1.0)
mlab.show()
# Save the voxelated model of the vehicle (sans door and other excluded parts)
data_io.save_multi_array(vox_veh_folder, vox_veh_file, voxel_data) | 3.078125 | 3 |
silver_bullet/crypto.py | Hojung-Jeong/Silver-Bullet-Encryption-Tool | 0 | 2628 | '''
>List of functions
1. encrypt(user_input,passphrase) - Encrypt the given string with the given passphrase. Returns cipher text and locked pad.
2. decrypt(cipher_text,locked_pad,passphrase) - Decrypt the cipher text encrypted with SBET. It requires cipher text, locked pad, and passphrase.
'''
# CODE ========================================================================
import zlib
import random
from hashlib import sha1
from silver_bullet.TRNG import trlist
from silver_bullet.contain_value import contain
ascii_value=256
def ciphering(target_list,pad,decrypt=False):
result=[]
for counter in range(len(pad)):
if decrypt==False:
operated=contain(target_list[counter]+pad[counter],ascii_value)
else:
operated=contain(int(target_list[counter])-pad[counter],ascii_value)
result.append(operated)
return result
def locker(pad,passphrase):
cutter=round(len(passphrase)/2)
splited=[passphrase[:cutter],passphrase[cutter:]]
locker=[0 for counter in range(len(pad))]
for element in splited:
bloated_seed=sha1(element.encode()).hexdigest()
random.seed(bloated_seed)
locker=[contain(random.randrange(ascii_value)+element,ascii_value) for element in locker]
holder=[]
for counter in range(len(pad)):
operated=int(pad[counter])^locker[counter]
holder.append(operated)
return holder
def encrypt(user_input,passphrase):
compressed=zlib.compress(user_input.encode())
ui_listed=list(compressed)
pad=trlist(len(ui_listed),ascii_value)
ct=ciphering(ui_listed,pad)
lp=locker(pad,passphrase)
cipher_text=' '.join(map(str,ct))
locked_pad=' '.join(map(str,lp))
return cipher_text, locked_pad
def decrypt(cipher_text,locked_pad,passphrase):
ct=cipher_text.split(' ')
lp=locked_pad.split(' ')
pad=locker(lp,passphrase)
pt=ciphering(ct,pad,True)
byted=bytes(pt)
decompressed=zlib.decompress(byted).decode()
return decompressed | 3.953125 | 4 |
pyfire/errors.py | RavidLevi98/pyfire | 0 | 2629 | <reponame>RavidLevi98/pyfire
# -*- coding: utf-8 -*-
"""
pyfire.errors
~~~~~~~~~~~~~~~~~~~~~~
Holds the global used base errors
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
class XMPPProtocolError(Exception):
"""Base class for all errors that can be
sent via XMPP Protocol to peer
"""
def __init__(self, error_element, error_namespace, error_name=None):
self.error_name = error_name
self.element = ET.Element(error_element)
self.element.set("xmlns", error_namespace)
# per default all errors are recoverable
self.unrecoverable = False
def __unicode__(self):
if self.error_name is not None:
self.element.append(ET.Element(self.error_name))
return unicode(ET.tostring(self.element))
| 1.710938 | 2 |
app/nextMoveLogic.py | thekitbag/starter-snake-python | 0 | 2630 | import random
class Status(object):
def getHeadPosition(gamedata):
me = gamedata['you']
my_position = me['body']
head = my_position[0]
return head
def getMyLength(gamedata):
me = gamedata['you']
my_position = me['body']
if my_position[0] == my_position[1] == my_position[2]:
return 1
elif my_position[1] == my_position[2]:
return 2
else: return len(my_position)
def getMyDirection(gamedata):
me = gamedata['you']
my_position = me['body']
if Status.getMyLength(gamedata) == 1:
return 'none'
elif my_position[0]['x'] > my_position[1]['x']:
return 'right'
elif my_position[0]['x'] < my_position[1]['x']:
return 'left'
elif my_position[0]['x'] == my_position[1]['x'] and my_position[0]['y'] < my_position[1]['y']:
return 'up'
else: return 'down'
def getHealth(gamedata):
pass
def getBoardSize(gamedata):
board_height = gamedata['board']['height']
board_width = gamedata['board']['width']
dimensions = {'height': board_height, 'width': board_width}
return dimensions
def getFoodPositions(gamedata):
pass
def getSnakesPositions(gamedata):
pass
class Assess(object):
def wallProximity(gamedata):
"""returns proximity to a wall
either parallel to, head-on or corner"""
head = Status.getHeadPosition(gamedata)
board_size = Status.getBoardSize(gamedata)
direction = Status.getMyDirection(gamedata)
height = board_size['height'] - 1
width = board_size['width'] - 1
#corners
if head['x'] == 0 and head['y'] == 0:
return {'type': 'corner', 'identifier': 'top left', 'direction': direction}
elif head['x'] == 0 and head['y'] == height:
return {'type': 'corner', 'identifier': 'bottom left', 'direction': direction}
elif head['x'] == width and head['y'] == 0:
return {'type': 'corner', 'identifier': 'top right', 'direction': direction}
elif head['x'] == width and head['y'] == height:
return {'type': 'corner', 'identifier': 'bottom right', 'direction': direction}
#headons
elif head['x'] == 0 and direction == 'left':
return {'type': 'head-on', 'identifier': 'left', 'direction': direction}
elif head['y'] == 0 and direction == 'up':
return {'type': 'head-on', 'identifier': 'top', 'direction': direction}
elif head['x'] == width and direction == 'right':
return {'type': 'head-on', 'identifier': 'right', 'direction': direction}
elif head['y'] == height and direction == 'down':
return {'type': 'head-on', 'identifier': 'bottom', 'direction': direction}
#parrallels
elif head['x'] == 0 and direction == 'up' or head['x'] == 0 and direction == 'down':
return {'type': 'parallel', 'identifier': 'left', 'direction': direction}
elif head['y'] == 0 and direction == 'right' or head['y'] == 0 and direction =='left':
return {'type': 'parallel', 'identifier': 'top', 'direction': direction}
elif head['x'] == width and direction =='down' or head['x'] == width and direction == 'up':
return {'type': 'parallel', 'identifier': 'right', 'direction': direction}
elif head['y'] == height and direction == 'left' or head['y'] == height and direction == 'right':
return {'type': 'parallel', 'identifier': 'bottom', 'direction': direction}
else: return False
def ownBodyProximity(gamedata):
pass
def killPossible(gamedata):
pass
def smallerSnakeNearby(gamedata):
pass
def biggerSnakeNearby(gamedata):
pass
def foodNearby(gamedata):
pass
class Action(object):
def avoidDeath():
pass
def chaseFood():
pass
def fleeSnake():
pass
def chaseSnake():
pass
class Decision(object):
def chooseBestOption(gamedata):
options = ['up', 'down', 'right', 'left']
current_direction = Status.getMyDirection(gamedata)
#first go
if current_direction == 'none':
choice = random.choice(options)
#remove opposite direction
if current_direction == 'up':
options.remove('down')
if current_direction == 'down':
options.remove('up')
if current_direction == 'right':
options.remove('left')
if current_direction == 'left':
options.remove('right')
#no danger keep going
if Assess.wallProximity(gamedata) == False:
choice = current_direction
#in a corner
elif Assess.wallProximity(gamedata)['type'] == 'corner':
options.remove(current_direction)
if Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'l':
if 'up' in options:
choice = 'down'
else: choice = 'right'
elif Assess.wallProximity(gamedata)['identifier'][0] == 't' and Assess.wallProximity(gamedata)['identifier'][4] == 'r':
if 'up' in options:
choice = 'down'
else: choice = 'left'
#headon
elif Assess.wallProximity(gamedata)['type'] == 'head-on':
options.remove(current_direction)
choice = random.choice(options)
#parallel
elif Assess.wallProximity(gamedata)['type'] == 'parallel':
choice = current_direction
else: print("shit")
print(options)
return choice
| 3.1875 | 3 |
generator/util.py | gbtami/lichess-puzzler | 1 | 2631 | from dataclasses import dataclass
import math
import chess
import chess.engine
from model import EngineMove, NextMovePair
from chess import Color, Board
from chess.pgn import GameNode
from chess.engine import SimpleEngine, Score
nps = []
def material_count(board: Board, side: Color) -> int:
values = { chess.PAWN: 1, chess.KNIGHT: 3, chess.BISHOP: 3, chess.ROOK: 5, chess.QUEEN: 9 }
return sum(len(board.pieces(piece_type, side)) * value for piece_type, value in values.items())
def material_diff(board: Board, side: Color) -> int:
return material_count(board, side) - material_count(board, not side)
def is_up_in_material(board: Board, side: Color) -> bool:
return material_diff(board, side) > 0
def get_next_move_pair(engine: SimpleEngine, node: GameNode, winner: Color, limit: chess.engine.Limit) -> NextMovePair:
info = engine.analyse(node.board(), multipv = 2, limit = limit)
global nps
nps.append(info[0]["nps"])
nps = nps[-20:]
# print(info)
best = EngineMove(info[0]["pv"][0], info[0]["score"].pov(winner))
second = EngineMove(info[1]["pv"][0], info[1]["score"].pov(winner)) if len(info) > 1 else None
return NextMovePair(node, winner, best, second)
def avg_knps():
global nps
return round(sum(nps) / len(nps) / 1000) if nps else 0
def win_chances(score: Score) -> float:
"""
winning chances from -1 to 1 https://graphsketch.com/?eqn1_color=1&eqn1_eqn=100+*+%282+%2F+%281+%2B+exp%28-0.004+*+x%29%29+-+1%29&eqn2_color=2&eqn2_eqn=&eqn3_color=3&eqn3_eqn=&eqn4_color=4&eqn4_eqn=&eqn5_color=5&eqn5_eqn=&eqn6_color=6&eqn6_eqn=&x_min=-1000&x_max=1000&y_min=-100&y_max=100&x_tick=100&y_tick=10&x_label_freq=2&y_label_freq=2&do_grid=0&do_grid=1&bold_labeled_lines=0&bold_labeled_lines=1&line_width=4&image_w=850&image_h=525
"""
mate = score.mate()
if mate is not None:
return 1 if mate > 0 else -1
cp = score.score()
return 2 / (1 + math.exp(-0.004 * cp)) - 1 if cp is not None else 0
CORRESP_TIME = 999999
def reject_by_time_control(line: str, has_master: bool, master_only: bool, bullet: bool, mates: bool) -> bool:
if not line.startswith("[TimeControl "):
return False
if master_only and not has_master:
return True
try:
seconds, increment = line[1:][:-2].split()[1].replace("\"", "").split("+")
total = int(seconds) + int(increment) * 40
if master_only or mates:
if bullet:
return total < 30 or total >= 160
else:
return total < 160 or total >= 480
else:
return total < (160 if has_master else 480)
except:
return True
def exclude_rating(line: str, mates: bool) -> bool:
if not line.startswith("[WhiteElo ") and not line.startswith("[BlackElo "):
return False
try:
return int(line[11:15]) < (1501 if mates else 1600)
except:
return True
| 2.8125 | 3 |
sleep.py | SkylerHoward/O | 0 | 2632 | import time, morning
from datetime import datetime
def main():
while True:
a = time.mktime(datetime.now().timetuple())
n = datetime.now()
if n.hour == 6 and (n.minute-(n.minute%5)) == 15:
return morning.main()
time.sleep(300 - (time.mktime(datetime.now().timetuple())-a)) | 3.546875 | 4 |
tests/unit/commands/local/start_lambda/test_cli.py | ourobouros/aws-sam-cli | 2 | 2633 | <reponame>ourobouros/aws-sam-cli
from unittest import TestCase
from mock import patch, Mock
from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli
from samcli.commands.local.cli_common.user_exceptions import UserException
from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException
from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError
class TestCli(TestCase):
def setUp(self):
self.template = "template"
self.env_vars = "env-vars"
self.debug_port = 123
self.debug_args = "args"
self.debugger_path = "/test/path"
self.docker_volume_basedir = "basedir"
self.docker_network = "network"
self.log_file = "logfile"
self.skip_pull_image = True
self.profile = "profile"
self.region = "region"
self.parameter_overrides = {}
self.host = "host"
self.port = 123
@patch("samcli.commands.local.start_lambda.cli.InvokeContext")
@patch("samcli.commands.local.start_lambda.cli.LocalLambdaService")
def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock,
invoke_context_mock):
# Mock the __enter__ method to return a object inside a context manager
context_mock = Mock()
invoke_context_mock.return_value.__enter__.return_value = context_mock
service_mock = Mock()
local_lambda_service_mock.return_value = service_mock
self.call_cli()
invoke_context_mock.assert_called_with(template_file=self.template,
function_identifier=None,
env_vars_file=self.env_vars,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
aws_profile=self.profile,
debug_port=self.debug_port,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
aws_region=self.region,
parameter_overrides=self.parameter_overrides)
local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock,
port=self.port,
host=self.host)
service_mock.start.assert_called_with()
@patch("samcli.commands.local.start_lambda.cli.InvokeContext")
def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock):
invoke_context_mock.side_effect = InvalidSamDocumentException("bad template")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "bad template"
self.assertEquals(msg, expected)
@patch("samcli.commands.local.start_lambda.cli.InvokeContext")
def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock):
invoke_context_mock.side_effect = OverridesNotWellDefinedError("bad env vars")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "bad env vars"
self.assertEquals(msg, expected)
def call_cli(self):
start_lambda_cli(ctx=None,
host=self.host,
port=self.port,
template=self.template,
env_vars=self.env_vars,
debug_port=self.debug_port,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
profile=self.profile,
region=self.region,
parameter_overrides=self.parameter_overrides)
| 2.140625 | 2 |
restapi/services/Utils/test_getters.py | Varun487/CapstoneProject_TradingSystem | 3 | 2634 | <gh_stars>1-10
from django.test import TestCase
import pandas as pd
from .getters import Getter
from .converter import Converter
from strategies.models import Company
from strategies.models import IndicatorType
class GetDataTestCase(TestCase):
def setUp(self) -> None:
# Dummy company data
Company.objects.create(name='abc', ticker='ABC', description='desc')
Company.objects.create(name='abcd', ticker='ABCD', description='desc')
Company.objects.create(name='abce', ticker='ABCE', description='desc')
# Dummy indicator data
IndicatorType.objects.create(name='abc', description='desc')
IndicatorType.objects.create(name='abcd', description='desc')
IndicatorType.objects.create(name='abce', description='desc')
self.param_list_company = {"name": "abc", "ticker": 'ABC', "description": 'desc'}
self.param_list_indicator_type = {"name": "abc", "description": 'desc'}
def test_input_none(self):
"""No inputs are given"""
self.assertEquals(Getter().table_name, None)
self.assertEquals(Getter().param_list, None)
self.assertEquals(Getter().df_flag, False)
def test_input_all(self):
"""All inputs provided"""
self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).table_name,
Company)
self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).param_list,
self.param_list_company)
self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).df_flag, True)
def test_input_df_flag(self):
"""Only df_flag input is provided"""
self.assertEquals(Getter(df_flag=True).df_flag, True)
self.assertEquals(Getter(df_flag=False).df_flag, False)
def test_get_data_correct_obj_list(self):
"""Whether it returns correct obj list when input is correct"""
# Returns correct object list for company
self.assertEquals(Getter(table_name=Company, df_flag=False, param_list=self.param_list_company).get_data(),
list(Company.objects.filter(**self.param_list_company)))
self.assertEquals(Getter(table_name=Company, param_list={"description": 'desc'}).get_data(),
list(Company.objects.filter(**{"description": 'desc'})))
self.assertEquals(Getter(table_name=Company, param_list={"name": "abcd"}).get_data(),
list(Company.objects.filter(**{"name": "abcd"})))
# Returns correct object list for Indicator
self.assertEquals(
Getter(table_name=IndicatorType, df_flag=False, param_list=self.param_list_indicator_type).get_data(),
list(IndicatorType.objects.filter(**self.param_list_indicator_type)))
self.assertEquals(Getter(table_name=IndicatorType, param_list={"description": 'desc'}).get_data(),
list(IndicatorType.objects.filter(**{"description": 'desc'})))
self.assertEquals(Getter(table_name=IndicatorType, param_list={"name": "abcd"}).get_data(),
list(IndicatorType.objects.filter(**{"name": "abcd"})))
def test_get_data_correct_df(self):
"""Whether it returns correct df when input is correct"""
# Returns correct df for company
self.assertEquals(Getter(table_name=Company, df_flag=True, param_list=self.param_list_company).get_data()
.equals(
Converter(obj_list=list(Company.objects.filter(**self.param_list_company))).to_df()
), True)
self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={"description": 'desc'}).get_data()
.equals(
Converter(obj_list=list(Company.objects.filter(**{"description": 'desc'}))).to_df()
), True)
self.assertEquals(Getter(table_name=Company, df_flag=True, param_list={"name": "abcd"}).get_data()
.equals(
Converter(obj_list=list(Company.objects.filter(**{"name": "abcd"}))).to_df()
), True)
# Returns correct df for indicator type
self.assertEquals(
Getter(table_name=IndicatorType, df_flag=True, param_list=self.param_list_indicator_type).get_data()
.equals(
Converter(obj_list=list(IndicatorType.objects.filter(**self.param_list_indicator_type))).to_df()
), True)
self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={"description": 'desc'}).get_data()
.equals(
Converter(obj_list=list(IndicatorType.objects.filter(**{"description": 'desc'}))).to_df()
), True)
self.assertEquals(Getter(table_name=IndicatorType, df_flag=True, param_list={"name": "abcd"}).get_data()
.equals(
Converter(obj_list=list(IndicatorType.objects.filter(**{"name": "abcd"}))).to_df()
), True)
def test_get_data_invalid_inputs(self):
self.assertRaises(TypeError,
Getter(table_name="IndicatorTyp", df_flag=True, param_list={"name": "abcd"}).get_data)
self.assertRaises(TypeError, Getter(table_name=IndicatorType, param_list={"nam": "abcd"}).get_data)
self.assertRaises(TypeError, Getter(table_name=Company, param_list={"name": "abcd", "res": "abcd"}))
| 2.578125 | 3 |
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py | abhatikar/training_extensions | 2 | 2635 | <filename>pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
import subprocess
import tempfile
from ote import MMDETECTION_TOOLS
from .base import BaseTrainer
from ..registry import TRAINERS
@TRAINERS.register_module()
class MMDetectionTrainer(BaseTrainer):
def __init__(self):
super(MMDetectionTrainer, self).__init__()
def _get_tools_dir(self):
return MMDETECTION_TOOLS
def _add_extra_args(self, cfg, config_path, update_config):
if self.__is_clustering_needed(cfg):
update_config = self.__cluster(cfg, config_path, update_config)
return update_config
@staticmethod
def __is_clustering_needed(cfg):
if cfg.total_epochs > 0:
return False
if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead':
return False
if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered':
return False
return True
@staticmethod
def __cluster(cfg, config_path, update_config):
logging.info('Clustering started...')
widths = cfg.model.bbox_head.anchor_generator.widths
n_clust = 0
for w in widths:
n_clust += len(w) if isinstance(w, (list, tuple)) else 1
n_clust = ' --n_clust ' + str(n_clust)
group_as = ''
if isinstance(widths[0], (list, tuple)):
group_as = ' --group_as ' + ' '.join([str(len(w)) for w in widths])
config = ' --config ' + config_path
tmp_file = tempfile.NamedTemporaryFile(delete=False)
out = f' --out {tmp_file.name}'
if 'pipeline' in cfg.data.train:
img_shape = [t for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][
'img_scale']
else:
img_shape = [t for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][
'img_scale']
img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}'
subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py'
f'{config}'
f'{n_clust}'
f'{group_as}'
f'{update_config}'
f'{img_shape}'
f'{out}'.split(' '), check=True)
with open(tmp_file.name) as src_file:
content = json.load(src_file)
widths, heights = content['widths'], content['heights']
if not update_config:
update_config = ' --update_config'
update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(" ", "")}'
update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(" ", "")}'
logging.info('... clustering completed.')
return update_config
| 1.664063 | 2 |
svn-go-stats/transform.py | BT-OpenSource/bt-betalab | 1 | 2636 | import sys
import json
import subprocess
import re
import statistics
def get_complexity():
# Load the cyclomatic complexity info
cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode("utf-8")
results = re.findall('([0-9]+)\s([^\s]+)\s([^\s]+)\s([^:]+):([0-9]+):([0-9]+)', cyclostats)
# Setup a dictionary in which to keep track of the complixities
# for each file
files = {}
# Build an array of complexities for each file
for result in results:
if result[3] in files:
files[result[3]].append(int(result[0]))
else:
files[result[3]] = [int(result[0])]
# Pick out the median value (picking the highest of the two
# middle entries if needed) for each file
for name, values in files.items():
files[name] = statistics.median_high(values)
return files
def get_duplicate_const_strings():
# Load the const string duplication info
cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode("utf-8")
results = re.findall('([^:]+).+ other occurrence\(s\) of \"(.+)\" found in: ([^:]+).+\n?', cyclostats)
files = {}
# Build an array containing the number of potentially duplicated
# constants by file
for result in results:
if result[0] in files:
files[result[0]] = files[result[0]]+1
else:
files[result[0]] = 1
return files
# Main service body
if __name__ == "__main__":
complexity = get_complexity()
duplicate_const_strings = get_duplicate_const_strings()
files = set()
files.update(complexity.keys())
files.update(duplicate_const_strings.keys())
result = []
for f in files:
result.append({
'filename': f,
'cyclomaticComplexity': complexity[f] if f in complexity else 0,
'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0
})
print(json.dumps(result)) | 2.890625 | 3 |
test/test_python_errors.py | yangyangxcf/parso | 0 | 2637 | """
Testing if parso finds syntax errors and indentation errors.
"""
import sys
import warnings
import pytest
import parso
from parso._compatibility import is_pypy
from .failing_examples import FAILING_EXAMPLES, indent, build_nested
if is_pypy:
# The errors in PyPy might be different. Just skip the module for now.
pytestmark = pytest.mark.skip()
def _get_error_list(code, version=None):
grammar = parso.load_grammar(version=version)
tree = grammar.parse(code)
return list(grammar.iter_errors(tree))
def assert_comparison(code, error_code, positions):
errors = [(error.start_pos, error.code) for error in _get_error_list(code)]
assert [(pos, error_code) for pos in positions] == errors
@pytest.mark.parametrize('code', FAILING_EXAMPLES)
def test_python_exception_matches(code):
wanted, line_nr = _get_actual_exception(code)
errors = _get_error_list(code)
actual = None
if errors:
error, = errors
actual = error.message
assert actual in wanted
# Somehow in Python3.3 the SyntaxError().lineno is sometimes None
assert line_nr is None or line_nr == error.start_pos[0]
def test_non_async_in_async():
"""
This example doesn't work with FAILING_EXAMPLES, because the line numbers
are not always the same / incorrect in Python 3.8.
"""
if sys.version_info[:2] < (3, 5):
pytest.skip()
# Raises multiple errors in previous versions.
code = 'async def foo():\n def nofoo():[x async for x in []]'
wanted, line_nr = _get_actual_exception(code)
errors = _get_error_list(code)
if errors:
error, = errors
actual = error.message
assert actual in wanted
if sys.version_info[:2] < (3, 8):
assert line_nr == error.start_pos[0]
else:
assert line_nr == 0 # For whatever reason this is zero in Python 3.8+
@pytest.mark.parametrize(
('code', 'positions'), [
('1 +', [(1, 3)]),
('1 +\n', [(1, 3)]),
('1 +\n2 +', [(1, 3), (2, 3)]),
('x + 2', []),
('[\n', [(2, 0)]),
('[\ndef x(): pass', [(2, 0)]),
('[\nif 1: pass', [(2, 0)]),
('1+?', [(1, 2)]),
('?', [(1, 0)]),
('??', [(1, 0)]),
('? ?', [(1, 0)]),
('?\n?', [(1, 0), (2, 0)]),
('? * ?', [(1, 0)]),
('1 + * * 2', [(1, 4)]),
('?\n1\n?', [(1, 0), (3, 0)]),
]
)
def test_syntax_errors(code, positions):
assert_comparison(code, 901, positions)
@pytest.mark.parametrize(
('code', 'positions'), [
(' 1', [(1, 0)]),
('def x():\n 1\n 2', [(3, 0)]),
('def x():\n 1\n 2', [(3, 0)]),
('def x():\n1', [(2, 0)]),
]
)
def test_indentation_errors(code, positions):
assert_comparison(code, 903, positions)
def _get_actual_exception(code):
with warnings.catch_warnings():
# We don't care about warnings where locals/globals misbehave here.
# It's as simple as either an error or not.
warnings.filterwarnings('ignore', category=SyntaxWarning)
try:
compile(code, '<unknown>', 'exec')
except (SyntaxError, IndentationError) as e:
wanted = e.__class__.__name__ + ': ' + e.msg
line_nr = e.lineno
except ValueError as e:
# The ValueError comes from byte literals in Python 2 like '\x'
# that are oddly enough not SyntaxErrors.
wanted = 'SyntaxError: (value error) ' + str(e)
line_nr = None
else:
assert False, "The piece of code should raise an exception."
# SyntaxError
# Python 2.6 has a bit different error messages here, so skip it.
if sys.version_info[:2] == (2, 6) and wanted == 'SyntaxError: unexpected EOF while parsing':
wanted = 'SyntaxError: invalid syntax'
if wanted == 'SyntaxError: non-keyword arg after keyword arg':
# The python 3.5+ way, a bit nicer.
wanted = 'SyntaxError: positional argument follows keyword argument'
elif wanted == 'SyntaxError: assignment to keyword':
return [wanted, "SyntaxError: can't assign to keyword",
'SyntaxError: cannot assign to __debug__'], line_nr
elif wanted == 'SyntaxError: assignment to None':
# Python 2.6 does has a slightly different error.
wanted = 'SyntaxError: cannot assign to None'
elif wanted == 'SyntaxError: can not assign to __debug__':
# Python 2.6 does has a slightly different error.
wanted = 'SyntaxError: cannot assign to __debug__'
elif wanted == 'SyntaxError: can use starred expression only as assignment target':
# Python 3.4/3.4 have a bit of a different warning than 3.5/3.6 in
# certain places. But in others this error makes sense.
return [wanted, "SyntaxError: can't use starred expression here"], line_nr
elif wanted == 'SyntaxError: f-string: unterminated string':
wanted = 'SyntaxError: EOL while scanning string literal'
elif wanted == 'SyntaxError: f-string expression part cannot include a backslash':
return [
wanted,
"SyntaxError: EOL while scanning string literal",
"SyntaxError: unexpected character after line continuation character",
], line_nr
elif wanted == "SyntaxError: f-string: expecting '}'":
wanted = 'SyntaxError: EOL while scanning string literal'
elif wanted == 'SyntaxError: f-string: empty expression not allowed':
wanted = 'SyntaxError: invalid syntax'
elif wanted == "SyntaxError: f-string expression part cannot include '#'":
wanted = 'SyntaxError: invalid syntax'
elif wanted == "SyntaxError: f-string: single '}' is not allowed":
wanted = 'SyntaxError: invalid syntax'
return [wanted], line_nr
def test_default_except_error_postition():
# For this error the position seemed to be one line off, but that doesn't
# really matter.
code = 'try: pass\nexcept: pass\nexcept X: pass'
wanted, line_nr = _get_actual_exception(code)
error, = _get_error_list(code)
assert error.message in wanted
assert line_nr != error.start_pos[0]
# I think this is the better position.
assert error.start_pos[0] == 2
def test_statically_nested_blocks():
def build(code, depth):
if depth == 0:
return code
new_code = 'if 1:\n' + indent(code)
return build(new_code, depth - 1)
def get_error(depth, add_func=False):
code = build('foo', depth)
if add_func:
code = 'def bar():\n' + indent(code)
errors = _get_error_list(code)
if errors:
assert errors[0].message == 'SyntaxError: too many statically nested blocks'
return errors[0]
return None
assert get_error(19) is None
assert get_error(19, add_func=True) is None
assert get_error(20)
assert get_error(20, add_func=True)
def test_future_import_first():
def is_issue(code, *args):
code = code % args
return bool(_get_error_list(code))
i1 = 'from __future__ import division'
i2 = 'from __future__ import absolute_import'
assert not is_issue(i1)
assert not is_issue(i1 + ';' + i2)
assert not is_issue(i1 + '\n' + i2)
assert not is_issue('"";' + i1)
assert not is_issue('"";' + i1)
assert not is_issue('""\n' + i1)
assert not is_issue('""\n%s\n%s', i1, i2)
assert not is_issue('""\n%s;%s', i1, i2)
assert not is_issue('"";%s;%s ', i1, i2)
assert not is_issue('"";%s\n%s ', i1, i2)
assert is_issue('1;' + i1)
assert is_issue('1\n' + i1)
assert is_issue('"";1\n' + i1)
assert is_issue('""\n%s\nfrom x import a\n%s', i1, i2)
assert is_issue('%s\n""\n%s', i1, i2)
def test_named_argument_issues(works_not_in_py):
message = works_not_in_py.get_error_message('def foo(*, **dict): pass')
message = works_not_in_py.get_error_message('def foo(*): pass')
if works_not_in_py.version.startswith('2'):
assert message == 'SyntaxError: invalid syntax'
else:
assert message == 'SyntaxError: named arguments must follow bare *'
works_not_in_py.assert_no_error_in_passing('def foo(*, name): pass')
works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1): pass')
works_not_in_py.assert_no_error_in_passing('def foo(bar, *, name=1, **dct): pass')
def test_escape_decode_literals(each_version):
"""
We are using internal functions to assure that unicode/bytes escaping is
without syntax errors. Here we make a bit of quality assurance that this
works through versions, because the internal function might change over
time.
"""
def get_msg(end, to=1):
base = "SyntaxError: (unicode error) 'unicodeescape' " \
"codec can't decode bytes in position 0-%s: " % to
return base + end
def get_msgs(escape):
return (get_msg('end of string in escape sequence'),
get_msg(r"truncated %s escape" % escape))
error, = _get_error_list(r'u"\x"', version=each_version)
assert error.message in get_msgs(r'\xXX')
error, = _get_error_list(r'u"\u"', version=each_version)
assert error.message in get_msgs(r'\uXXXX')
error, = _get_error_list(r'u"\U"', version=each_version)
assert error.message in get_msgs(r'\UXXXXXXXX')
error, = _get_error_list(r'u"\N{}"', version=each_version)
assert error.message == get_msg(r'malformed \N character escape', to=2)
error, = _get_error_list(r'u"\N{foo}"', version=each_version)
assert error.message == get_msg(r'unknown Unicode character name', to=6)
# Finally bytes.
error, = _get_error_list(r'b"\x"', version=each_version)
wanted = r'SyntaxError: (value error) invalid \x escape'
if sys.version_info >= (3, 0):
# The positioning information is only available in Python 3.
wanted += ' at position 0'
assert error.message == wanted
def test_too_many_levels_of_indentation():
assert not _get_error_list(build_nested('pass', 99))
assert _get_error_list(build_nested('pass', 100))
base = 'def x():\n if x:\n'
assert not _get_error_list(build_nested('pass', 49, base=base))
assert _get_error_list(build_nested('pass', 50, base=base))
@pytest.mark.parametrize(
'code', [
"f'{*args,}'",
r'f"\""',
r'f"\\\""',
r'fr"\""',
r'fr"\\\""',
r"print(f'Some {x:.2f} and some {y}')",
]
)
def test_valid_fstrings(code):
assert not _get_error_list(code, version='3.6')
@pytest.mark.parametrize(
('code', 'message'), [
("f'{1+}'", ('invalid syntax')),
(r'f"\"', ('invalid syntax')),
(r'fr"\"', ('invalid syntax')),
]
)
def test_invalid_fstrings(code, message):
"""
Some fstring errors are handled differntly in 3.6 and other versions.
Therefore check specifically for these errors here.
"""
error, = _get_error_list(code, version='3.6')
assert message in error.message
@pytest.mark.parametrize(
'code', [
"from foo import (\nbar,\n rab,\n)",
"from foo import (bar, rab, )",
]
)
def test_trailing_comma(code):
errors = _get_error_list(code)
assert not errors
| 2.6875 | 3 |
shogitk/usikif.py | koji-hirono/pytk-shogi-replayer | 0 | 2638 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE
RANKNUM = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'f': 6,
'g': 7,
'h': 8,
'i': 9
}
def decoder(f):
color = [BLACK, WHITE]
step = 0
for line in f:
line = line.strip()
if line[0] == '[':
pass
elif line[0].isdigit():
src = Coords(int(line[0]), RANKNUM[line[1]])
dst = Coords(int(line[2]), RANKNUM[line[3]])
if line[-1] == '+':
modifier = PROMOTE
else:
modifier = None
yield Move(color[step & 1], dst, src, None, modifier=modifier)
step += 1
elif line[0].isupper():
dst = Coords(int(line[2]), RANKNUM[line[3]])
yield Move(color[step & 1], dst, None, line[0], modifier=DROP)
step += 1
| 2.59375 | 3 |
etherbank_cli/oracles.py | ideal-money/etherbank-cli | 1 | 2639 | <reponame>ideal-money/etherbank-cli
import click
from . import utils
@click.group()
def main():
"Simple CLI for oracles to work with Ether dollar"
pass
@main.command()
@click.option('--ether-price', type=float, help="The ether price in ether dollar")
@click.option('--collateral-ratio', type=float, help="The collateral ratio")
@click.option(
'--liquidation-duration',
type=int,
help="The liquidation duration in minutes")
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def vote(ether_price, collateral_ratio, liquidation_duration, private_key):
"Vote on the variable for setting up Ether Bank"
assert [ether_price, collateral_ratio, liquidation_duration
].count(None) == 2, "You should set one variable per vote"
if ether_price:
var_code = 0
value = int(ether_price * 100)
elif collateral_ratio:
var_code = 1
value = int(collateral_ratio * 1000)
elif liquidation_duration:
var_code = 2
value = liquidation_duration * 60
func = utils.contracts['oracles'].functions.vote(var_code, value)
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
@main.command()
@click.option('--oracle', required=True, help="The oracle's address")
@click.option('--score', type=int, required=True, help="The oracle's score")
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def set_score(oracle, score, private_key):
"Edit oracle's score"
oracle = utils.w3.toChecksumAddress(oracle)
func = utils.contracts['oracles'].functions.setScore(oracle, score)
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
@main.command()
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def finish_recruiting(private_key):
"Set recruiting as finished"
func = utils.contracts['oracles'].functions.finishRecruiting()
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
if __name__ == '__main__':
main()
| 2.484375 | 2 |
src/grader/machine.py | MrKaStep/csc230-grader | 0 | 2640 | import getpass
from plumbum import local
from plumbum.machines.paramiko_machine import ParamikoMachine
from plumbum.path.utils import copy
def _once(f):
res = None
def wrapped(*args, **kwargs):
nonlocal res
if res is None:
res = f(*args, **kwargs)
return res
return wrapped
@_once
def get_remote_machine_with_password(host, user):
password = getpass.getpass(prompt=f"Password for {user}@{host}: ", stream=None)
rem = ParamikoMachine(host, user=user, password=password)
return rem
@_once
def get_remote_machine(host, user, keyfile):
rem = ParamikoMachine(host, user=user, keyfile=keyfile)
return rem
def get_local_machine():
return local
def with_machine_rule(cls):
old_init = cls.__init__
def new_init(self, config):
if "machine" not in config:
machine_type = "local"
else:
machine_type = config["machine"]["type"]
if machine_type == "local":
self.machine = get_local_machine()
self.files_to_copy = None
elif machine_type == "remote":
if "keyfile" in config["machine"]:
self.machine = get_remote_machine(config["machine"]["host"], config["machine"]["user"], config["machine"]["keyfile"])
else:
self.machine = get_remote_machine_with_password(config["machine"]["host"], config["machine"]["user"])
self.files_to_copy = config["machine"].get("files_to_copy")
else:
raise ValueError(f"Invalid machine type: {config['machine']['type']}")
self.machine_type = machine_type
old_init(self, config)
cls.__init__ = new_init
old_apply = cls.apply
def new_apply(self, project):
with self.machine.tempdir() as tempdir:
project_path = tempdir / "project"
project_path.mkdir()
existing_files = set([f.name for f in project.root.list()])
if self.files_to_copy:
for fname in self.files_to_copy:
if fname in existing_files:
copy(project.root / fname, project_path / fname)
else:
for f in project.files():
if f.name in existing_files:
copy(f.path, project_path / f.name)
with self.machine.cwd(project_path):
self.session = self.machine.session()
self.session.run(f"cd {project_path}")
return old_apply(self, project)
cls.apply = new_apply
return cls
| 2.390625 | 2 |
Mundo 1/Ex33.py | legna7/Python | 0 | 2641 | <reponame>legna7/Python<gh_stars>0
salario = float(input('digite o seu salario: '))
aumento = (salario + (salario * 15)/100 if salario <= 1250 else salario + (salario * 10)/100)
print(aumento) | 3.8125 | 4 |
tests/test_tree.py | andreax79/airflow-code-editor | 194 | 2642 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import os.path
import airflow
import airflow.plugins_manager
from airflow import configuration
from flask import Flask
from unittest import TestCase, main
from airflow_code_editor.commons import PLUGIN_NAME
from airflow_code_editor.tree import (
get_tree,
)
assert airflow.plugins_manager
app = Flask(__name__)
class TestTree(TestCase):
def setUp(self):
self.root_dir = os.path.dirname(os.path.realpath(__file__))
configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False')
configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir)
def test_tree(self):
with app.app_context():
t = get_tree()
self.assertTrue(len(t) > 0)
self.assertTrue('git' in (x['id'] for x in t))
def test_tags(self):
with app.app_context():
t = get_tree("tags")
self.assertIsNotNone(t)
def test_local_branches(self):
with app.app_context():
t = get_tree("local-branches")
self.assertIsNotNone(t)
def test_remote_branches(self):
with app.app_context():
t = get_tree("remote-branches")
self.assertIsNotNone(t)
def test_files(self):
with app.app_context():
t = get_tree("files")
self.assertTrue(
len([x.get('id') for x in t if x.get('id') == 'test_utils.py']) == 1
)
t = get_tree("files/folder")
self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1']) == 1)
def test_git(self):
with app.app_context():
t = get_tree("git/HEAD")
self.assertTrue(t is not None)
class TestTreeGitDisabled(TestCase):
def setUp(self):
self.root_dir = os.path.dirname(os.path.realpath(__file__))
configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False')
configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir)
configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False')
def test_tree(self):
with app.app_context():
t = get_tree()
self.assertTrue(len(t) > 0)
self.assertTrue('git' not in (x['id'] for x in t))
t = get_tree("tags")
self.assertEqual(t, [])
t = get_tree("local-branches")
self.assertEqual(t, [])
t = get_tree("remote-branches")
self.assertEqual(t, [])
t = get_tree("files")
self.assertTrue(
len([x.get('id') for x in t if x.get('id') == 'test_utils.py']) == 1
)
t = get_tree("files/folder")
self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1']) == 1)
if __name__ == '__main__':
main()
| 2.25 | 2 |
examples/token_freshness.py | greenape/flask-jwt-extended | 2 | 2643 | from quart import Quart, jsonify, request
from quart_jwt_extended import (
JWTManager,
jwt_required,
create_access_token,
jwt_refresh_token_required,
create_refresh_token,
get_jwt_identity,
fresh_jwt_required,
)
app = Quart(__name__)
app.config["JWT_SECRET_KEY"] = "super-secret" # Change this!
jwt = JWTManager(app)
# Standard login endpoint. Will return a fresh access token and
# a refresh token
@app.route("/login", methods=["POST"])
async def login():
username = (await request.get_json()).get("username", None)
password = (await request.get_json()).get("password", None)
if username != "test" or password != "<PASSWORD>":
return {"msg": "Bad username or password"}, 401
# create_access_token supports an optional 'fresh' argument,
# which marks the token as fresh or non-fresh accordingly.
# As we just verified their username and password, we are
# going to mark the token as fresh here.
ret = {
"access_token": create_access_token(identity=username, fresh=True),
"refresh_token": create_refresh_token(identity=username),
}
return ret, 200
# Refresh token endpoint. This will generate a new access token from
# the refresh token, but will mark that access token as non-fresh,
# as we do not actually verify a password in this endpoint.
@app.route("/refresh", methods=["POST"])
@jwt_refresh_token_required
async def refresh():
current_user = get_jwt_identity()
new_token = create_access_token(identity=current_user, fresh=False)
ret = {"access_token": new_token}
return ret, 200
# Fresh login endpoint. This is designed to be used if we need to
# make a fresh token for a user (by verifying they have the
# correct username and password). Unlike the standard login endpoint,
# this will only return a new access token, so that we don't keep
# generating new refresh tokens, which entirely defeats their point.
@app.route("/fresh-login", methods=["POST"])
async def fresh_login():
username = (await request.get_json()).get("username", None)
password = (await request.get_json()).get("password", None)
if username != "test" or password != "<PASSWORD>":
return {"msg": "Bad username or password"}, 401
new_token = create_access_token(identity=username, fresh=True)
ret = {"access_token": new_token}
return ret, 200
# Any valid JWT can access this endpoint
@app.route("/protected", methods=["GET"])
@jwt_required
async def protected():
username = get_jwt_identity()
return dict(logged_in_as=username), 200
# Only fresh JWTs can access this endpoint
@app.route("/protected-fresh", methods=["GET"])
@fresh_jwt_required
async def protected_fresh():
username = get_jwt_identity()
return dict(fresh_logged_in_as=username), 200
if __name__ == "__main__":
app.run()
| 2.734375 | 3 |
env/lib/python3.7/site-packages/tinvest/typedefs.py | umchemurziev/Practics | 1 | 2644 | from datetime import datetime
from typing import Any, Dict, Union
__all__ = 'AnyDict'
AnyDict = Dict[str, Any] # pragma: no mutate
datetime_or_str = Union[datetime, str] # pragma: no mutate
| 2.625 | 3 |
keras/linear/model/pipeline_train.py | PipelineAI/models | 44 | 2645 | <gh_stars>10-100
import os
os.environ['KERAS_BACKEND'] = 'theano'
os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu'
import cloudpickle as pickle
import pipeline_invoke
import pandas as pd
import numpy as np
import keras
from keras.layers import Input, Dense
from keras.models import Model
from keras.models import save_model, load_model
from sklearn.preprocessing import StandardScaler, MinMaxScaler, Normalizer
if __name__ == '__main__':
df = pd.read_csv("../input/training/training.csv")
df["People per Television"] = pd.to_numeric(df["People per Television"],errors='coerce')
df = df.dropna()
x = df["People per Television"].values.reshape(-1,1).astype(np.float64)
y = df["People per Physician"].values.reshape(-1,1).astype(np.float64)
# min-max -1,1
sc = MinMaxScaler(feature_range=(-1,1))
x_ = sc.fit_transform(x)
y_ = sc.fit_transform(y)
inputs = Input(shape=(1,))
preds = Dense(1,activation='linear')(inputs)
model = Model(inputs=inputs,outputs=preds)
sgd = keras.optimizers.SGD()
model.compile(optimizer=sgd ,loss='mse')
model.fit(x_,y_, batch_size=1, verbose=1, epochs=10, shuffle=False)
save_model(model, 'state/keras_theano_linear_model_state.h5')
# model_pkl_path = 'model.pkl'
# with open(model_pkl_path, 'wb') as fh:
# pickle.dump(pipeline_invoke, fh)
| 2.25 | 2 |
tests/effects/test_cheerlights.py | RatJuggler/led-shim-effects | 1 | 2646 | from unittest import TestCase
from unittest.mock import Mock, patch
import sys
sys.modules['smbus'] = Mock() # Mock the hardware layer to avoid errors.
from ledshimdemo.canvas import Canvas
from ledshimdemo.effects.cheerlights import CheerLightsEffect
class TestCheerLights(TestCase):
TEST_CANVAS_SIZE = 3 # type: int
def test_cheerlight_call(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
self.assertIsNone(effect.get_colour_from_channel("http://ejiferfneciudwedwojcmeiocnw.com"))
@patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None)
def test_effect_failed_cheerlights(self, patch_function):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
effect.compose()
patch_function.assert_called_once()
for i in range(canvas.get_size()):
self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL)
def test_effect_working_cheerlights(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
# Must check before and after in case it changes during the test.
before = effect.get_colour_from_channel(effect.URL)
effect.compose()
after = effect.get_colour_from_channel(effect.URL)
self.assertRegex(repr(effect), "^CheerLights\\(Colour:({0}|{1})\\)$".format(before, after))
| 2.796875 | 3 |
figures/Figure_7/02_generate_images.py | Jhsmit/ColiCoords-Paper | 2 | 2647 | from colicoords.synthetic_data import add_readout_noise, draw_poisson
from colicoords import load
import numpy as np
import mahotas as mh
from tqdm import tqdm
import os
import tifffile
def chunk_list(l, sizes):
prev = 0
for s in sizes:
result = l[prev:prev+s]
prev += s
yield result
def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape):
nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int)
nums = nums[nums > 0]
assert sum(nums) < len(cell_list), 'Not enough cells'
chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))]
dicts = [generate_image(cells, shape) for cells in tqdm(chunked)]
out_dict = {}
for i, d in enumerate(dicts):
for k, v in d.items():
if 'storm' in k:
v['frame'] = i + 1
if k in out_dict:
out_dict[k] = np.append(out_dict[k], v)
else:
out_dict[k] = v
else:
if k in out_dict:
out_dict[k][i] = v
else:
out_dict[k] = np.zeros((num_images, *shape))
out_dict[k][i] = v
return out_dict
def generate_image(cells, shape, max_dist=5):
thetas = 360 * np.random.rand(len(cells))
data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)]
assert all([data.names == data_list[0].names for data in data_list]), 'All cells must have the same data elements'
out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'}
for i, data in enumerate(data_list):
valid_position = False
while not valid_position:
pos_x = int(np.round(shape[1] * np.random.rand()))
pos_y = int(np.round(shape[0] * np.random.rand()))
min1 = pos_y - int(np.floor(data.shape[0]))
max1 = min1 + data.shape[0]
min2 = pos_x - int(np.floor(data.shape[1]))
max2 = min2 + data.shape[1]
# Crop the data for when the cell is on the border of the image
d_min1 = np.max([0 - min1, 0])
d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]])
d_min2 = np.max([0 - min2, 0])
d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]])
data_cropped = data[d_min1:d_max1, d_min2:d_max2]
# Limit image position to the edges of the image
min1 = np.max([min1, 0])
max1 = np.min([max1, shape[0]])
min2 = np.max([min2, 0])
max2 = np.min([max2, shape[1]])
temp_binary = np.zeros(shape)
temp_binary[min1:max1, min2:max2] = data_cropped.binary_img
out_binary = (out_dict['binary'] > 0).astype(int)
distance_map = mh.distance(1 - out_binary, metric='euclidean')
if np.any(distance_map[temp_binary.astype(bool)] < max_dist):
continue
valid_position = True
for name in data.names:
data_elem = data_cropped.data_dict[name]
if data_elem.dclass == 'storm':
data_elem['x'] += min2
data_elem['y'] += min1
xmax, ymax = shape[1], shape[0]
bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax)
data_out = data_elem[~bools].copy()
if name in out_dict:
out_dict[name] = np.append(out_dict[name], data_out)
else:
out_dict[name] = data_out
continue
elif data_elem.dclass == 'binary':
out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem)
else:
out_dict[name][min1:max1, min2:max2] += data_elem
return out_dict
def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3):
xmax = shape[1]
ymax = shape[0]
step = 1
xi = np.arange(step / 2, xmax, step)
yi = np.arange(step / 2, ymax, step)
x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T
y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi))
x, y = storm_table['x'], storm_table['y']
img = np.zeros_like(x_coords)
intensities = storm_table['intensity']
sigma = sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x))
for _sigma, _int, _x, _y in zip(sigma, intensities, x, y):
img += _int * np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)
return img
def gen_im(data_dir):
"""Generate microscopy images from a list of cell objects by placing them randomly oriented in the image."""
cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))
out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512))
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary'])
np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield'])
np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner'])
np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer'])
np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner'])
np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer'])
tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary'])
tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner'])
def noise_bf(data_dir):
"""add poissonian and readout noise to brightfield images"""
noise = 20
img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy'))
for photons in [10000, 1000, 500]:
ratio = 1.0453 # ratio between 'background' (no cells) and cell wall
img = (photons*(ratio-1))*img_stack + photons
img = draw_poisson(img)
img = add_readout_noise(img, noise)
tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img)
np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img)
if __name__ == '__main__':
np.random.seed(42)
data_dir = r'.'
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
gen_im(data_dir)
noise_bf(data_dir)
| 2.328125 | 2 |
epiphancloud/models/settings.py | epiphan-video/epiphancloud_api | 0 | 2648 | <filename>epiphancloud/models/settings.py
class DeviceSettings:
def __init__(self, settings):
self._id = settings["id"]
self._title = settings["title"]
self._type = settings["type"]["name"]
self._value = settings["value"]
@property
def id(self):
return self._id
@property
def value(self):
return self._value
| 2.296875 | 2 |
dictionary.py | SchmitzAndrew/OSS-101-example | 0 | 2649 | <filename>dictionary.py
word = input("Enter a word: ")
if word == "a":
print("one; any")
elif word == "apple":
print("familiar, round fleshy fruit")
elif word == "rhinoceros":
print("large thick-skinned animal with one or two horns on its nose")
else:
print("That word must not exist. This dictionary is very comprehensive.")
| 4.03125 | 4 |
solved_bronze/num11720.py | ilmntr/white_study | 0 | 2650 | <gh_stars>0
cnt = int(input())
num = list(map(int, input()))
sum = 0
for i in range(len(num)):
sum = sum + num[i]
print(sum) | 3.21875 | 3 |
setup.py | sdnhub/kube-navi | 0 | 2651 | <gh_stars>0
from distutils.core import setup
setup(
name = 'kube_navi',
packages = ['kube_navi'], # this must be the same as the name above
version = '0.1',
description = 'Kubernetes resource discovery toolkit',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/sdnhub/kube-navi', # use the URL to the github repo
download_url = 'https://github.com/sdnhub/kube-navi/archive/0.1.tar.gz', # I'll explain this in a second
keywords = ['testing', 'logging', 'example'], # arbitrary keywords
classifiers = [],
)
| 1.515625 | 2 |
flink-ai-flow/ai_flow/metric/utils.py | MarvinMiao/flink-ai-extended | 1 | 2652 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
from typing import Text, Optional, Union, List
from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \
MetricSummaryResponse, ListMetricSummaryResponse
from ai_flow.rest_endpoint.service import int64Value, stringValue
from ai_flow.common.properties import Properties
from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary
from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \
SUCCESS, RESOURCE_DOES_NOT_EXIST
from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary
from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta
def table_to_metric_meta(metric_meta_result) -> MetricMeta:
properties = metric_meta_result.properties
if properties is not None:
properties = ast.literal_eval(properties)
return MetricMeta(uuid=metric_meta_result.uuid,
name=metric_meta_result.name,
dataset_id=metric_meta_result.dataset_id,
model_name=metric_meta_result.model_name,
model_version=metric_meta_result.model_version,
job_id=metric_meta_result.job_id,
start_time=metric_meta_result.start_time,
end_time=metric_meta_result.end_time,
metric_type=MetricType.value_of(metric_meta_result.metric_type),
uri=metric_meta_result.uri,
tags=metric_meta_result.tags,
metric_description=metric_meta_result.metric_description,
properties=properties)
def table_to_metric_summary(metric_summary_result) -> MetricSummary:
return MetricSummary(uuid=metric_summary_result.uuid,
metric_id=metric_summary_result.metric_id,
metric_key=metric_summary_result.metric_key,
metric_value=metric_summary_result.metric_value)
def metric_meta_to_table(name: Text,
dataset_id: int,
model_name: Optional[Text],
model_version: Optional[Text],
job_id: int,
start_time: int,
end_time: int,
metric_type: MetricType,
uri: Text,
tags: Text,
metric_description: Text,
properties: Properties,
store_type: Text = 'SqlAlchemyStore'):
if properties is not None:
properties = str(properties)
if store_type == 'MongoStore':
_class = MongoMetricMeta
else:
_class = SqlMetricMeta
return _class(name=name,
dataset_id=dataset_id,
model_name=model_name,
model_version=model_version,
job_id=job_id,
start_time=start_time,
end_time=end_time,
metric_type=metric_type.value,
uri=uri,
tags=tags,
metric_description=metric_description,
properties=properties)
def metric_summary_to_table(metric_id: int,
metric_key: Text,
metric_value: Text,
store_type: Text = 'SqlAlchemyStore'):
if store_type == 'MongoStore':
_class = MongoMetricSummary
else:
_class = SqlMetricSummary
return _class(metric_id=metric_id,
metric_key=metric_key,
metric_value=metric_value)
def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto:
if metric_meta.metric_type == MetricType.DATASET:
metric_type = MetricTypeProto.DATASET
else:
metric_type = MetricTypeProto.MODEL
return MetricMetaProto(uuid=metric_meta.uuid,
name=stringValue(metric_meta.name),
dataset_id=int64Value(metric_meta.dataset_id),
model_name=stringValue(metric_meta.model_name),
model_version=stringValue(metric_meta.model_version),
job_id=int64Value(metric_meta.job_id),
start_time=int64Value(metric_meta.start_time),
end_time=int64Value(metric_meta.end_time),
metric_type=metric_type,
uri=stringValue(metric_meta.uri),
tags=stringValue(metric_meta.tags),
metric_description=stringValue(metric_meta.metric_description),
properties=metric_meta.properties)
def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto:
return MetricSummaryProto(uuid=metric_summary.uuid,
metric_id=int64Value(metric_summary.metric_id),
metric_key=stringValue(metric_summary.metric_key),
metric_value=stringValue(metric_summary.metric_value))
def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta:
if MetricTypeProto.DATASET == metric_meta_proto.metric_type:
metric_type = MetricType.DATASET
else:
metric_type = MetricType.MODEL
return MetricMeta(uuid=metric_meta_proto.uuid,
name=metric_meta_proto.name.value,
dataset_id=metric_meta_proto.dataset_id.value,
model_name=metric_meta_proto.model_name.value,
model_version=metric_meta_proto.model_version.value,
job_id=metric_meta_proto.job_id.value,
start_time=metric_meta_proto.start_time.value,
end_time=metric_meta_proto.end_time.value,
metric_type=metric_type,
uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None,
tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None,
metric_description=metric_meta_proto.metric_description.value
if metric_meta_proto.HasField('metric_description') else None,
properties=metric_meta_proto.properties
)
def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary:
return MetricSummary(uuid=metric_summary_proto.uuid,
metric_id=metric_summary_proto.metric_id.value,
metric_key=metric_summary_proto.metric_key.value
if metric_summary_proto.HasField('metric_key') else None,
metric_value=metric_summary_proto.metric_value.value
if metric_summary_proto.HasField('metric_value') else None
)
def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse:
if metric_meta is not None:
return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=metric_meta_to_proto(metric_meta))
else:
return MetricMetaResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_meta=None)
def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse:
if metric_meta is not None:
if isinstance(metric_meta, MetricMeta):
return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=[metric_meta_to_proto(metric_meta)])
else:
res = []
for meta in metric_meta:
res.append(metric_meta_to_proto(meta))
return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=res)
else:
return ListMetricMetaResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_meta=None)
def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse:
if metric_summary is not None:
return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_summary=metric_summary_to_proto(metric_summary))
else:
return MetricSummaryResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_summary=None)
def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse:
if metric_summary is not None:
res = []
for summary in metric_summary:
res.append(metric_summary_to_proto(summary))
return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_summary=res)
else:
return ListMetricSummaryResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_summary=None)
| 1.507813 | 2 |
src/moduels/gui/Tab_Help.py | HaujetZhao/Caps_Writer | 234 | 2653 | <reponame>HaujetZhao/Caps_Writer<filename>src/moduels/gui/Tab_Help.py
# -*- coding: UTF-8 -*-
from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout
from PySide2.QtCore import Signal
from moduels.component.NormalValue import 常量
from moduels.component.SponsorDialog import SponsorDialog
import os, webbrowser
class Tab_Help(QWidget):
状态栏消息 = Signal(str, int)
def __init__(self):
super().__init__()
self.initElement() # 先初始化各个控件
self.initSlots() # 再将各个控件连接到信号槽
self.initLayout() # 然后布局
self.initValue() # 再定义各个控件的值
def initElement(self):
self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档'))
self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记'))
self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程'))
self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本'))
self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本'))
self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群'))
self.tipButton = QPushButton(self.tr('打赏作者'))
self.masterLayout = QVBoxLayout()
def initSlots(self):
self.打开帮助按钮.clicked.connect(self.openHelpDocument)
self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489')))
self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/')))
self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases')))
self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases')))
self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open(
self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi')))
self.tipButton.clicked.connect(lambda: SponsorDialog(self))
def initLayout(self):
self.setLayout(self.masterLayout)
# self.masterLayout.addWidget(self.打开帮助按钮)
# self.masterLayout.addWidget(self.ffmpegMannualNoteButton)
self.masterLayout.addWidget(self.openVideoHelpButtone)
self.masterLayout.addWidget(self.openGiteePage)
self.masterLayout.addWidget(self.openGithubPage)
self.masterLayout.addWidget(self.linkToDiscussPage)
self.masterLayout.addWidget(self.tipButton)
def initValue(self):
self.打开帮助按钮.setMaximumHeight(100)
self.ffmpegMannualNoteButton.setMaximumHeight(100)
self.openVideoHelpButtone.setMaximumHeight(100)
self.openGiteePage.setMaximumHeight(100)
self.openGithubPage.setMaximumHeight(100)
self.linkToDiscussPage.setMaximumHeight(100)
self.tipButton.setMaximumHeight(100)
def openHelpDocument(self):
try:
if 常量.系统平台 == 'Darwin':
import shlex
os.system("open " + shlex.quote(self.tr("./misc/Docs/README_zh.html")))
elif 常量.系统平台 == 'Windows':
os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html')))
except:
print('未能打开帮助文档')
| 2.15625 | 2 |
app/routes/register.py | AuFeld/COAG | 1 | 2654 | <gh_stars>1-10
from typing import Callable, Optional, Type, cast
from fastapi import APIRouter, HTTPException, Request, status
from app.models import users
from app.common.user import ErrorCode, run_handler
from app.users.user import (
CreateUserProtocol,
InvalidPasswordException,
UserAlreadyExists,
ValidatePasswordProtocol,
)
def get_register_router(
create_user: CreateUserProtocol,
user_model: Type[users.BaseUser],
user_create_model: Type[users.BaseUserCreate],
after_register: Optional[Callable[[users.UD, Request], None]] = None,
validate_password: Optional[ValidatePasswordProtocol] = None,
) -> APIRouter:
"""Generate a router with the register route."""
router = APIRouter()
@router.post(
"/register", response_model=user_model, status_code=status.HTTP_201_CREATED
)
async def register(request: Request, user: user_create_model): # type: ignore
user = cast(users.BaseUserCreate, user) # Prevent mypy complain
if validate_password:
try:
await validate_password(user.password, user)
except InvalidPasswordException as e:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail={
"code": ErrorCode.REGISTER_INVALID_PASSWORD,
"reason": e.reason,
},
)
try:
created_user = await create_user(user, safe=True)
except UserAlreadyExists:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=ErrorCode.REGISTER_USER_ALREADY_EXISTS,
)
if after_register:
await run_handler(after_register, created_user, request)
return created_user
return router
| 2.609375 | 3 |
utils/visual.py | xizaoqu/Panoptic-PolarNet | 90 | 2655 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
def flow_to_img(flow, normalize=True):
"""Convert flow to viewable image, using color hue to encode flow vector orientation, and color saturation to
encode vector length. This is similar to the OpenCV tutorial on dense optical flow, except that they map vector
length to the value plane of the HSV color model, instead of the saturation plane, as we do here.
Args:
flow: optical flow
normalize: Normalize flow to 0..255
Returns:
img: viewable representation of the dense optical flow in RGB format
Ref:
https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py
"""
hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32))
# A couple times, we've gotten NaNs out of the above...
nans = np.isnan(flow_magnitude)
if np.any(nans):
nans = np.where(nans)
flow_magnitude[nans] = 0.
# Normalize
hsv[..., 0] = flow_angle * 180 / np.pi / 2
if normalize is True:
hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)
else:
hsv[..., 1] = flow_magnitude
hsv[..., 2] = 255
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return img | 3.875 | 4 |
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py | zhkmxx9302013/SoftwarePilot | 4 | 2656 | <filename>DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py<gh_stars>1-10
import argparse
from PIL import Image, ImageStat
import math
parser = argparse.ArgumentParser()
parser.add_argument('fname')
parser.add_argument('pref', default="", nargs="?")
args = parser.parse_args()
im = Image.open(args.fname)
RGB = im.convert('RGB')
imWidth, imHeight = im.size
ratg = 1.2
ratgb = 1.66
ming = 10
ratr = 2
speed = 8
leafcount = 0
total = 0
for i in range(0, int(imWidth/speed)):
for j in range(0, int(imHeight/speed)):
R,G,B = RGB.getpixel((i*speed,j*speed))
if R*ratg < G and B*ratgb < G and B*ratr < R:
leafcount = leafcount + 1
total = total+1
print("LAI="+str(float(leafcount)/total))
| 2.4375 | 2 |
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py | ped998/scripts | 0 | 2657 | <gh_stars>0
#!/usr/bin/env python
"""cluster storage stats for python"""
# import pyhesity wrapper module
from pyhesity import *
from datetime import datetime
import codecs
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to connect to
parser.add_argument('-u', '--username', type=str, required=True) # username
parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local
parser.add_argument('-pwd', '--password', type=str, default=None) # optional password
parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
password = <PASSWORD>
unit = args.unit
if unit.lower() == 'tib':
multiplier = 1024 * 1024 * 1024 * 1024
unit = 'TiB'
else:
multiplier = 1024 * 1024 * 1024
unit = 'GiB'
def toUnits(value):
return round(float(value) / multiplier, 1)
# authenticate
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True)
# outfile
now = datetime.now()
# cluster = api('get', 'cluster')
dateString = now.strftime("%Y-%m-%d")
outfile = 'heliosStorageStats-%s.csv' % dateString
f = codecs.open(outfile, 'w')
# headings
f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\n' % (unit, unit, unit, unit, unit))
stats = {}
def parseStats(clusterName, dataPoint, statName):
if clusterName not in stats.keys():
stats[clusterName] = {}
stats[clusterName][statName] = dataPoint['data']['int64Value']
endMsecs = dateToUsecs(now.strftime("%Y-%m-%d %H:%M:%S")) / 1000
startMsecs = (timeAgo(2, 'days')) / 1000
print('\nGathering cluster stats:\n')
for cluster in heliosClusters():
heliosCluster(cluster)
print(' %s' % cluster['name'])
capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs))
consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs))
dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs))
dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs))
logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs))
parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity')
parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed')
parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn')
parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten')
parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize')
for clusterName in sorted(stats.keys()):
capacity = stats[clusterName]['capacity']
consumed = stats[clusterName]['consumed']
dataIn = stats[clusterName]['dataIn']
dataWritten = stats[clusterName]['dataWritten']
logicalSize = stats[clusterName]['logicalSize']
free = capacity - consumed
pctUsed = round(100 * consumed / capacity, 0)
storageReduction = round(float(logicalSize) / consumed, 1)
dataReduction = round(float(dataIn) / dataWritten, 1)
f.write('"%s","%s","%s","%s","%s","%s","%s","%s","%s"\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction))
f.close()
print('\nOutput saved to %s\n' % outfile)
| 2.40625 | 2 |
src/advanceoperate/malimgthread.py | zengrx/S.M.A.R.T | 10 | 2658 | <reponame>zengrx/S.M.A.R.T<filename>src/advanceoperate/malimgthread.py<gh_stars>1-10
#coding=utf-8
from PyQt4 import QtCore
import os, glob, numpy, sys
from PIL import Image
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import BallTree
from sklearn import cross_validation
from sklearn.utils import shuffle
import sklearn
import leargist
import cPickle
import random
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
class ValidationResult(QtCore.QThread):
finishSignal = QtCore.pyqtSignal(list)
def __init__(self, parent=None):
super(ValidationResult, self).__init__(parent)
def getClassifyLabel(self):
X = numpy.load("./datafiles/img_features.npy") # 特征
y = numpy.load("./datafiles/img_labels.npy") # 标签
n = cPickle.load(open("./datafiles/img.p","rb")) # 标号
l = cPickle.load(open("./datafiles/imglabel.p", "rb")) # [家族号, 家族中序号, 文件名, 总序号]
return X, y, n ,l
'''
准备绘制矩阵的数据
@X:特征矩阵
@y:标签
@n:所有样本家族名称
@l:对应家族个数
'''
def prepareData2Matrix(self, X, y, n, l):
n_samples, useless = X.shape
p = range(n_samples)
random.seed(random.random())
random.shuffle(p)
X, y = X[p], y[p] # 打乱数组
kfold = 10 # 10重
skf = StratifiedKFold(y,kfold)
skfind = [None] * len(skf)
cnt = 0
for train_index in skf:
skfind[cnt] = train_index
cnt += 1
list_fams = n
cache = []
no_imgs = []
for l_list in l:
if 0 == l_list[1]:
# print l[l_list[3] - 1]
# print l_list
cache.append(l[l_list[3] - 1][1] + 1)
no_imgs = cache[1:len(cache)]
no_imgs.append(cache[0])
# print no_imgs # 输出所有家族包含文件个数
conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵
n_neighbors = 5
# 10-fold Cross Validation
for i in range(kfold):
train_indices = skfind[i][0]
test_indices = skfind[i][1]
clf = []
clf = KNeighborsClassifier(n_neighbors, weights='distance')
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
# Training
import time
tic = time.time()
clf.fit(X_train,y_train)
toc = time.time()
print "training time= ", toc-tic # roughly 2.5 secs
# Testing
y_predict = []
tic = time.time()
y_predict = clf.predict(X_test) # output is labels and not indices
toc = time.time()
print "testing time = ", toc-tic # roughly 0.3 secs
# Compute confusion matrix
cm = []
cm = confusion_matrix(y_test,y_predict)
conf_mat = conf_mat + cm
return conf_mat, no_imgs, list_fams
def run(self):
print "start draw"
X, y, n, l = self.getClassifyLabel()
cm, nimg, listf = self.prepareData2Matrix(X, y, n, l)
msg = [cm, nimg, listf]
self.finishSignal.emit(msg)
class MalwareImageClass(QtCore.QThread):
malwarSignal = QtCore.pyqtSignal(int, list)
concluSignal = QtCore.pyqtSignal(int, list)
def __init__(self, filename, parent=None):
super(MalwareImageClass, self).__init__(parent)
self.filename = str(filename)#.encode('cp936')
self.feature = ''
'''
获取训练结果
特征,标签,文件名称及相应的序号
'''
def getClassifyLabel(self):
X = numpy.load("./datafiles/img_features.npy") # 特征
y = numpy.load("./datafiles/img_labels.npy") # 标签
n = cPickle.load(open("./datafiles/img.p","rb")) # 标号
l = cPickle.load(open("./datafiles/imglabel.p", "rb")) # [家族号, 家族中序号, 文件名, 总序号]
return X, y, n ,l
'''
对图片进行分类
train@训练集特征
label@训练集标签
'''
def classifyImage(self, feature_X, label_y, number):
im = Image.open(self.filename)
im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64
des = leargist.color_gist(im1); # 960 values
feature = des[0:320]; # 生成灰阶图,只需要前320内容
query_feature = feature.reshape(1, -1)
self.feature = query_feature
# 获取特征和标签
X = feature_X
y = label_y
n = number
n_neighbors = 5; # better to have this at the start of the code
knn = KNeighborsClassifier(n_neighbors, weights='distance')
knn.fit(X, y)
num = int(knn.predict(query_feature))
classname = n[num]
proba = knn.predict_proba(query_feature)
msg = [num, classname, proba]
self.malwarSignal.emit(1, msg)
'''
balltrees寻找数据集中最相近的样本
返回距离值及样本标签号
'''
def findMostSimilarImg(self, feature_X, serial):
X = feature_X
b = BallTree(X)
# 5个最相近的样本
dist, ind = b.query(self.feature, k=3)
print dist, ind
ind = ind[0]
# print ind
l = serial
imgs = []
for rank in ind:
# print rank
for name in l:
if rank == name[3]:
# print name
imgs.append(name[2])
self.concluSignal.emit(2, imgs)
def run(self):
X, y, n ,l = self.getClassifyLabel()
self.classifyImage(X, y, n)
self.findMostSimilarImg(X, l)
| 2.140625 | 2 |
tests/unittests/command_parse/test_stream.py | itamarhaber/iredis | 1,857 | 2659 | def test_xrange(judge_command):
judge_command(
"XRANGE somestream - +",
{"command": "XRANGE", "key": "somestream", "stream_id": ["-", "+"]},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069"],
},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069-10",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069-10"],
},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069-10 count 10",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069-10"],
"count_const": "count",
"count": "10",
},
)
def test_xgroup_create(judge_command):
judge_command(
"XGROUP CREATE mykey mygroup 123",
{
"command": "XGROUP",
"stream_create": "CREATE",
"key": "mykey",
"group": "mygroup",
"stream_id": "123",
},
)
judge_command(
"XGROUP CREATE mykey mygroup $",
{
"command": "XGROUP",
"stream_create": "CREATE",
"key": "mykey",
"group": "mygroup",
"stream_id": "$",
},
)
# short of a parameter
judge_command("XGROUP CREATE mykey mygroup", None)
judge_command("XGROUP CREATE mykey", None)
def test_xgroup_setid(judge_command):
judge_command(
"XGROUP SETID mykey mygroup 123",
{
"command": "XGROUP",
"stream_setid": "SETID",
"key": "mykey",
"group": "mygroup",
"stream_id": "123",
},
)
judge_command(
"XGROUP SETID mykey mygroup $",
{
"command": "XGROUP",
"stream_setid": "SETID",
"key": "mykey",
"group": "mygroup",
"stream_id": "$",
},
)
# two subcommand together shouldn't match
judge_command("XGROUP CREATE mykey mygroup 123 SETID mykey mygroup $", None)
def test_xgroup_destroy(judge_command):
judge_command(
"XGROUP destroy mykey mygroup",
{
"command": "XGROUP",
"stream_destroy": "destroy",
"key": "mykey",
"group": "mygroup",
},
)
judge_command("XGROUP destroy mykey", None)
judge_command("XGROUP DESTROY mykey mygroup $", None)
def test_xgroup_delconsumer(judge_command):
judge_command(
"XGROUP delconsumer mykey mygroup myconsumer",
{
"command": "XGROUP",
"stream_delconsumer": "delconsumer",
"key": "mykey",
"group": "mygroup",
"consumer": "myconsumer",
},
)
judge_command(
"XGROUP delconsumer mykey mygroup $",
{
"command": "XGROUP",
"stream_delconsumer": "delconsumer",
"key": "mykey",
"group": "mygroup",
"consumer": "$",
},
)
judge_command("XGROUP delconsumer mykey mygroup", None)
def test_xgroup_stream(judge_command):
judge_command(
"XACK mystream group1 123123",
{
"command": "XACK",
"key": "mystream",
"group": "group1",
"stream_id": "123123",
},
)
judge_command(
"XACK mystream group1 123123 111",
{"command": "XACK", "key": "mystream", "group": "group1", "stream_id": "111"},
)
def test_xinfo(judge_command):
judge_command(
"XINFO consumers mystream mygroup",
{
"command": "XINFO",
"stream_consumers": "consumers",
"key": "mystream",
"group": "mygroup",
},
)
judge_command(
"XINFO GROUPS mystream",
{"command": "XINFO", "stream_groups": "GROUPS", "key": "mystream"},
)
judge_command(
"XINFO STREAM mystream",
{"command": "XINFO", "stream": "STREAM", "key": "mystream"},
)
judge_command("XINFO HELP", {"command": "XINFO", "help": "HELP"})
judge_command("XINFO consumers mystream mygroup GROUPS mystream", None)
judge_command("XINFO groups mystream mygroup", None)
def test_xinfo_with_full(judge_command):
judge_command(
"XINFO STREAM mystream FULL",
{
"command": "XINFO",
"stream": "STREAM",
"key": "mystream",
"full_const": "FULL",
},
)
judge_command(
"XINFO STREAM mystream FULL count 10",
{
"command": "XINFO",
"stream": "STREAM",
"key": "mystream",
"full_const": "FULL",
"count_const": "count",
"count": "10",
},
)
def test_xpending(judge_command):
judge_command(
"XPENDING mystream group55",
{"command": "XPENDING", "key": "mystream", "group": "group55"},
)
judge_command(
"XPENDING mystream group55 myconsumer",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"consumer": "myconsumer",
},
)
judge_command(
"XPENDING mystream group55 - + 10",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"stream_id": ["-", "+"],
"count": "10",
},
)
judge_command(
"XPENDING mystream group55 - + 10 myconsumer",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"stream_id": ["-", "+"],
"count": "10",
"consumer": "myconsumer",
},
)
judge_command("XPENDING mystream group55 - + ", None)
def test_xadd(judge_command):
judge_command(
"xadd mystream MAXLEN ~ 1000 * key value",
{
"command": "xadd",
"key": "mystream",
"maxlen": "MAXLEN",
"approximately": "~",
"count": "1000",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
# test for MAXLEN option
judge_command(
"xadd mystream MAXLEN 1000 * key value",
{
"command": "xadd",
"key": "mystream",
"maxlen": "MAXLEN",
"count": "1000",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
judge_command(
"xadd mystream * key value",
{
"command": "xadd",
"key": "mystream",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
# spcify stream id
judge_command(
"xadd mystream 123-123 key value",
{
"command": "xadd",
"key": "mystream",
"sfield": "key",
"svalue": "value",
"stream_id": "123-123",
},
)
judge_command(
"xadd mystream 123-123 key value foo bar hello world",
{
"command": "xadd",
"key": "mystream",
"sfield": "hello",
"svalue": "world",
"stream_id": "123-123",
},
)
def test_xtrim(judge_command):
judge_command(
" XTRIM mystream MAXLEN 2",
{"command": "XTRIM", "key": "mystream", "maxlen": "MAXLEN", "count": "2"},
)
judge_command(
" XTRIM mystream MAXLEN ~ 2",
{
"command": "XTRIM",
"key": "mystream",
"maxlen": "MAXLEN",
"count": "2",
"approximately": "~",
},
)
judge_command(" XTRIM mystream", None)
def test_xdel(judge_command):
judge_command(
"XDEL mystream 1581165000000 1549611229000 1581060831000",
{"command": "XDEL", "key": "mystream", "stream_id": "1581060831000"},
)
judge_command(
"XDEL mystream 1581165000000",
{"command": "XDEL", "key": "mystream", "stream_id": "1581165000000"},
)
def test_xclaim(judge_command):
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456 789",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "789",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": ["3600000", "300"],
"stream_id": "1526569498055-0",
"idel": "IDEL",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"retrycount": "retrycount",
"count": "7",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"time": "TIME",
"timestamp": "123456789",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"force": "FORCE",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"justid": "JUSTID",
},
)
def test_xread(judge_command):
judge_command(
"XREAD COUNT 2 STREAMS mystream writers 0-0 0-0",
{
"command": "XREAD",
"count_const": "COUNT",
"count": "2",
"streams": "STREAMS",
# FIXME current grammar can't support multiple tokens
# so the ids will be recongized to keys.
"keys": "mystream writers 0-0",
"stream_id": "0-0",
},
)
judge_command(
"XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0 0-0",
{
"command": "XREAD",
"count_const": "COUNT",
"count": "2",
"streams": "STREAMS",
"keys": "mystream writers 0-0",
"block": "BLOCK",
"millisecond": "1000",
"stream_id": "0-0",
},
)
def test_xreadgroup(judge_command):
judge_command(
"XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS key1 1 key2 2",
{
"command": "XREADGROUP",
"stream_group": "GROUP",
"group": "mygroup1",
"consumer": "Bob",
"count_const": "COUNT",
"count": "1",
"block": "BLOCK",
"millisecond": "100",
"noack": "NOACK",
"streams": "STREAMS",
"keys": "key1 1 key2",
"stream_id": "2",
},
)
judge_command(
"XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2 2",
{
"command": "XREADGROUP",
"stream_group": "GROUP",
"group": "mygroup1",
"consumer": "Bob",
"streams": "STREAMS",
"keys": "key1 1 key2",
"stream_id": "2",
},
)
judge_command("XREADGROUP GROUP group consumer", None)
| 1.898438 | 2 |
tests/test_find_forks/test_find_forks.py | ivan2kh/find_forks | 41 | 2660 | <filename>tests/test_find_forks/test_find_forks.py
# coding: utf-8
"""test_find_fork."""
# pylint: disable=no-self-use
from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
import unittest
from six import PY3
from find_forks.__init__ import CONFIG
from find_forks.find_forks import add_forks, determine_names, find_forks, main
from .__init__ import BASEPATH
if PY3:
from unittest.mock import patch, MagicMock, Mock # pylint: disable=no-name-in-module
else:
from mock import patch, MagicMock, Mock
class FindForksCommon(unittest.TestCase):
@staticmethod
def make_mock(json_response):
"""Used in test_interesting.py."""
response_mock = MagicMock()
response_mock.read = Mock(return_value=json_response)
if PY3:
response_mock.status = 200
response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel="next", '
'<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel="last"')
else:
response_mock.code = 200
response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel="next", '
'<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel="last"'), ))
return response_mock
def make_test(self, response_mock):
"""Used in test_interesting.py."""
url = 'https://github.com/frost-nzcr4/find_forks'
with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock:
with patch('find_forks.git_wrapper.subprocess.call', return_value=None):
self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2')
urlopen_mock.assert_called_once_with(url, timeout=6)
if PY3:
response_mock.status = 404
else:
response_mock.code = 404
self.assertIsNone(add_forks(url))
class FindForksTest(FindForksCommon):
def test_add_forks(self):
self.assertIsNone(add_forks('httttps://unavailable!url'))
with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture:
json_response = fixture.read()
response_mock = self.make_mock(json_response)
self.make_test(response_mock)
def test_determine_names(self):
"""To run this test you'll need to prepare git first, run:
git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git
git remote add test-origin-2 https://github.com/yagmort/symfony1.git
git remote add test-origin-3 [email protected]:tjerkw/Android-SlideExpandableListView.git
"""
user, repo = determine_names()
self.assertEqual(user, 'frost-nzcr4')
self.assertEqual(repo, 'find_forks')
user, repo = determine_names('test-origin-1')
self.assertEqual(user, 'frost-nzcr4')
self.assertEqual(repo, 'webmoney')
user, repo = determine_names('test-origin-2')
self.assertEqual(user, 'yagmort')
self.assertEqual(repo, 'symfony1')
user, repo = determine_names('test-origin-3')
self.assertEqual(user, 'tjerkw')
self.assertEqual(repo, 'Android-SlideExpandableListView')
with self.assertRaises(RuntimeError):
user, repo = determine_names('name-with-an-error')
def test_find_forks(self):
sent_args = {
'per_page': 99,
'start_page': 3
}
url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page'])
with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock:
with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock:
find_forks(**sent_args)
add_forks_mock.assert_called_once_with(url)
call_mock.assert_called_once()
def test_main(self):
with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock:
main()
sent_args = CONFIG.copy()
sent_args.update({'user': None, 'repo': None, 'no_fetch': False})
find_forks_mock.assert_called_once_with(**sent_args)
# Test __version__ exceptions.
find_forks_mock = MagicMock(side_effect=SystemError())
del find_forks_mock.__version__
modules = {
'find_forks.__init__': find_forks_mock
}
with patch.dict('sys.modules', modules):
self.assertRaises(ImportError, main)
| 2.3125 | 2 |
neutron/agent/l3/dvr_router.py | insequent/neutron | 0 | 2661 | # Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import netaddr
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
# xor-folding mask used for IPv6 rule index
MASK_30 = 0x3fffffff
class DvrRouter(router.RouterInfo):
def __init__(self, agent, host, *args, **kwargs):
super(DvrRouter, self).__init__(*args, **kwargs)
self.agent = agent
self.host = host
self.floating_ips_dict = {}
self.snat_iptables_manager = None
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
self.snat_namespace = None
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = super(DvrRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def get_snat_interfaces(self):
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def get_snat_int_device_name(self, port_id):
long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id
return long_name[:self.driver.DEV_NAME_LEN]
def _handle_fip_nat_rules(self, interface_name, action):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
use_namespaces is set as False then the agent can
only configure one router, otherwise each router's
NAT rules will be in their own namespace.
"""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action is add_rules
if action == 'add_rules' and interface_name:
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority()
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_garp_for_proxyarp(fip_ns_name,
interface_name,
floating_ip,
self.agent_conf.send_arp_for_ha)
# update internal structures
self.dist_fip_count = self.dist_fip_count + 1
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
self.fip_ns.deallocate_rule_priority(rule_pr)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
self.dist_fip_count = self.dist_fip_count - 1
if self.dist_fip_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip),
table=dvr_fip_ns.FIP_RT_TBL)
self.fip_ns.local_subnets.release(self.router_id)
self.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
is_last = self.fip_ns.unsubscribe(self.router_id)
if is_last:
# TODO(Carl) I can't help but think that another router could
# come in and want to start using this namespace while this is
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.
self.fip_ns.delete()
self.fip_ns = None
def add_floating_ip(self, fip, interface_name, device):
if not self._add_fip_addr_to_device(fip, device):
return l3_constants.FLOATINGIP_STATUS_ERROR
# Special Handling for DVR - update FIP namespace
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_added_dist(fip, ip_cidr)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
super(DvrRouter, self).remove_floating_ip(device, ip_cidr)
self.floating_ip_removed_dist(ip_cidr)
def create_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that creates a gateway for a dvr. The first step
# is to move the creation of the snat namespace here
self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'],
self.agent_conf,
self.driver,
self.use_ipv6)
self.snat_namespace.create()
return self.snat_namespace
def delete_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that removes an external gateway for a dvr. The
# first step is to move the deletion of the snat namespace here
self.snat_namespace.delete()
self.snat_namespace = None
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _update_arp_entry(self, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
# update arp entry only if the subnet is attached to the router
if not port:
return
try:
# TODO(mrsmith): optimize the calls below for bulk calls
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if operation == 'add':
device.neigh.add(ip, mac)
elif operation == 'delete':
device.neigh.delete(ip, mac)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("DVR: Failed updating arp entry"))
def _set_subnet_arp_info(self, port):
"""Set ARP info retrieved from Plugin for existing ports."""
if 'id' not in port['subnet']:
return
subnet_id = port['subnet']['id']
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
for p in subnet_ports:
if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add')
def _map_internal_interfaces(self, int_port, snat_ports):
"""Return the SNAT port for the given internal interface port."""
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports if
p['fixed_ips'][0]['subnet_id'] == subnet_id]
if match_port:
return match_port[0]
else:
LOG.error(_LE('DVR: no map match_port found!'))
@staticmethod
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
The index value has to be 32 bits or less but more than the system
generated entries i.e. 32768. For IPv4 use the numeric value of the
cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
Use the freed range to extend smaller values so that they become
greater than system generated entries.
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values
snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
if snat_idx < 32768:
snat_idx = snat_idx + MASK_30
else:
snat_idx = net.value
return snat_idx
def _snat_redirect_add(self, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
try:
ip_cidr = sn_port['ip_cidr']
snat_idx = self._get_snat_idx(ip_cidr)
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
ns_ipd.route.add_gateway(gateway, table=snat_idx)
ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx)
ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.'
'send_redirects=0' % sn_int])
except Exception:
LOG.exception(_LE('DVR: error adding redirection logic'))
def _snat_redirect_remove(self, gateway, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
try:
ip_cidr = sn_port['ip_cidr']
snat_idx = self._get_snat_idx(ip_cidr)
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
ns_ipd.route.delete_gateway(gateway, table=snat_idx)
ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx)
except Exception:
LOG.exception(_LE('DVR: removed snat failed'))
def get_gw_port_host(self):
host = self.router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
self.router['id'])
return host
def internal_network_added(self, port):
super(DvrRouter, self).internal_network_added(port)
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
snat_ports = self.get_snat_interfaces()
sn_port = self._map_internal_interfaces(port, snat_ports)
if not sn_port:
return
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'],
port,
interface_name)
# TODO(Carl) This is a sign that dvr needs two router classes.
is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and
self.get_gw_port_host() == self.host)
if not is_this_snat_host:
return
ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
self._set_subnet_info(sn_port)
interface_name = self.get_snat_int_device_name(sn_port['id'])
self._internal_network_added(
ns_name,
sn_port['network_id'],
sn_port['id'],
sn_port['ip_cidr'],
sn_port['mac_address'],
interface_name,
dvr_snat_ns.SNAT_INT_DEV_PREFIX)
self._set_subnet_arp_info(port)
def _dvr_internal_network_removed(self, port):
if not self.ex_gw_port:
return
sn_port = self._map_internal_interfaces(port, self.snat_ports)
if not sn_port:
return
# DVR handling code for SNAT
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'],
port,
interface_name)
is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and
self.ex_gw_port['binding:host_id'] == self.host)
if not is_this_snat_host:
return
snat_interface = (
self.get_snat_int_device_name(sn_port['id']))
ns_name = self.snat_namespace.name
prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX
if ip_lib.device_exists(snat_interface, namespace=ns_name):
self.driver.unplug(snat_interface, namespace=ns_name,
prefix=prefix)
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrRouter, self).internal_network_removed(port)
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
return next(
(p for p in fip_ports if p['network_id'] == ext_net_id), None)
| 1.734375 | 2 |
sider/warnings.py | PCManticore/sider | 19 | 2662 | <reponame>PCManticore/sider
""":mod:`sider.warnings` --- Warning categories
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module defines several custom warning category classes.
"""
class SiderWarning(Warning):
"""All warning classes used by Sider extend this base class."""
class PerformanceWarning(SiderWarning, RuntimeWarning):
"""The category for warnings about performance worries. Operations
that warn this category would work but be inefficient.
"""
class TransactionWarning(SiderWarning, RuntimeWarning):
"""The category for warnings about transactions."""
| 1.859375 | 2 |
kadal/query.py | Bucolo/Kadal | 1 | 2663 | MEDIA_SEARCH = """
query ($search: String, $type: MediaType, $exclude: MediaFormat, $isAdult: Boolean) {
Media(search: $search, type: $type, format_not: $exclude, isAdult: $isAdult) {
id
type
format
title {
english
romaji
native
}
synonyms
status
description
startDate {
year
month
day
}
endDate {
year
month
day
}
episodes
chapters
volumes
coverImage {
large
color
}
bannerImage
genres
averageScore
siteUrl
isAdult
nextAiringEpisode {
timeUntilAiring
episode
}
}
}
"""
MEDIA_BY_ID = """
query ($id: Int, $type: MediaType) {
Media(id: $id, type: $type) {
id
type
format
title {
english
romaji
native
}
synonyms
status
description
startDate {
year
month
day
}
endDate {
year
month
day
}
episodes
chapters
coverImage {
large
color
}
bannerImage
genres
averageScore
siteUrl
isAdult
nextAiringEpisode {
timeUntilAiring
episode
}
}
}
"""
MEDIA_PAGED = """
query (
$id: Int,
$page: Int,
$perPage: Int,
$search: String,
$type: MediaType,
$sort: [MediaSort] = [SEARCH_MATCH],
$exclude: MediaFormat,
$isAdult: Boolean
) {
Page(page: $page, perPage: $perPage) {
media(id: $id, search: $search, type: $type, sort: $sort, format_not: $exclude, isAdult: $isAdult) {
id
type
format
title {
english
romaji
native
}
synonyms
status
description
startDate {
year
month
day
}
endDate {
year
month
day
}
episodes
chapters
volumes
coverImage {
large
color
}
bannerImage
genres
averageScore
siteUrl
isAdult
popularity
}
}
}
"""
USER_SEARCH = """
query ($search: String) {
User(search: $search) {
id
name
html_about: about(asHtml: true)
about
avatar {
large
}
bannerImage
siteUrl
stats {
watchedTime
chaptersRead
}
}
}
"""
USER_BY_ID = """
query ($id: Int) {
User(id: $id) {
id
name
html_about: about(asHtml: true)
about
avatar {
large
}
bannerImage
siteUrl
stats {
watchedTime
chaptersRead
}
}
}
"""
| 1.273438 | 1 |
sandbox/test/testChainop.py | turkeydonkey/nzmath3 | 1 | 2664 | <reponame>turkeydonkey/nzmath3<filename>sandbox/test/testChainop.py
import unittest
import operator
import sandbox.chainop as chainop
class BasicChainTest (unittest.TestCase):
def testBasicChain(self):
double = lambda x: x * 2
self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31))
square = lambda x: x ** 2
self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31))
class MultiChainTest (unittest.TestCase):
def testMultiChain(self):
double = lambda x: x * 2
self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3), 31))
square = lambda x: x ** 2
self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31))
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 3.015625 | 3 |
labs_final/lab5/experiments/run_trpo_pendulum.py | mrmotallebi/berkeley-deeprl-bootcamp | 3 | 2665 | #!/usr/bin/env python
import chainer
from algs import trpo
from env_makers import EnvMaker
from models import GaussianMLPPolicy, MLPBaseline
from utils import SnapshotSaver
import numpy as np
import os
import logger
log_dir = "data/local/trpo-pendulum"
np.random.seed(42)
# Clean up existing logs
os.system("rm -rf {}".format(log_dir))
with logger.session(log_dir):
env_maker = EnvMaker('Pendulum-v0')
env = env_maker.make()
policy = GaussianMLPPolicy(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
baseline = MLPBaseline(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
trpo(
env=env,
env_maker=env_maker,
n_envs=16,
policy=policy,
baseline=baseline,
batch_size=10000,
n_iters=100,
snapshot_saver=SnapshotSaver(log_dir),
)
| 1.96875 | 2 |
jtyoui/regular/regexengine.py | yy1244/Jtyoui | 1 | 2666 | #!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/12/2 10:17
# @Author: <EMAIL>
"""
正则解析器
"""
try:
import xml.etree.cElementTree as et
except ModuleNotFoundError:
import xml.etree.ElementTree as et
import re
class RegexEngine:
def __init__(self, xml, str_):
"""加载正则表。正则表为xml
:param xml: 正则表的位置
:param str_: 要匹配的字符串
"""
self._string = str_
self._root = et.parse(xml).getroot()
self.re = ''
self.data = []
def select(self, tag):
"""根据xml的tag来实现不同的正则提取
:param tag: xml的tag标签
:return: 正则提取的数据
"""
root = self._root.find(tag)
attrib = root.attrib
if attrib.get('part', 'False').lower() == 'true':
self._part_tag(root)
return list(filter(lambda x: x[1], self.data))
else:
sf = self._no_part(root)
self.re = ''.join(self.data) + sf
return re.findall(self.re, self._string)
def _no_part(self, tags):
"""tag标签不分开抽取"""
for tag in tags:
if tag:
if tag.attrib.get('must', 'true').lower() == 'true':
self.data.append(self.re)
self.re = ''
self.re = '(?:' + self._no_part(tag) + ')'
else:
self.re = self._no_part(tag)
else:
attrib = tag.attrib
text = tag.text.strip()
if attrib.get('must', 'true').lower() == 'true':
self.re = '(?:' + text + ')'
else:
self.re += '(?:' + text + ')?'
return self.re
def _part_tag(self, tags):
"""tag标签分开提取"""
for tag in tags:
if tag:
self._part_tag(tag)
else:
self.data.append((tag.tag, re.findall(tag.text.strip(), self._string)))
@property
def string(self):
return self._string
@string.setter
def string(self, str_):
self._string = str_
self.re, self.data = '', []
| 2.78125 | 3 |
proglearn/transformers.py | rflperry/ProgLearn | 0 | 2667 | <gh_stars>0
"""
Main Author: <NAME>
Corresponding Email: <EMAIL>
"""
import keras
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from .base import BaseTransformer
class NeuralClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
network : object
A neural network used in the classification transformer.
euclidean_layer_idx : int
An integer to represent the final layer of the transformer.
optimizer : str or keras.optimizers instance
An optimizer used when compiling the neural network.
loss : str, default="categorical_crossentropy"
A loss function used when compiling the neural network.
pretrained : bool, default=False
A boolean used to identify if the network is pretrained.
compile_kwargs : dict, default={"metrics": ["acc"]}
A dictionary containing metrics for judging network performance.
fit_kwargs : dict, default={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
A dictionary to hold epochs, callbacks, verbose, and validation split for the network.
Attributes
----------
encoder_ : object
A Keras model with inputs and outputs based on the network attribute.
Output layers are determined by the euclidean_layer_idx parameter.
"""
def __init__(
self,
network,
euclidean_layer_idx,
optimizer,
loss="categorical_crossentropy",
pretrained=False,
compile_kwargs={"metrics": ["acc"]},
fit_kwargs={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
):
self.network = keras.models.clone_model(network)
self.encoder_ = keras.models.Model(
inputs=self.network.inputs,
outputs=self.network.layers[euclidean_layer_idx].output,
)
self.pretrained = pretrained
self.optimizer = optimizer
self.loss = loss
self.compile_kwargs = compile_kwargs
self.fit_kwargs = fit_kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : NeuralClassificationTransformer
The object itself.
"""
check_X_y(X, y)
_, y = np.unique(y, return_inverse=True)
# more typechecking
self.network.compile(
loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs
)
self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
check_is_fitted(self)
check_array(X)
return self.encoder_.predict(X)
class TreeClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
kwargs : dict, default={}
A dictionary to contain parameters of the tree.
Attributes
----------
transformer : sklearn.tree.DecisionTreeClassifier
an internal sklearn DecisionTreeClassifier
"""
def __init__(self, kwargs={}):
self.kwargs = kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : TreeClassificationTransformer
The object itself.
"""
X, y = check_X_y(X, y)
self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
check_is_fitted(self)
X = check_array(X)
return self.transformer_.apply(X)
| 2.765625 | 3 |
morphelia/external/saphire.py | marx-alex/Morphelia | 0 | 2668 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
from matplotlib.ticker import MaxNLocator
plt.style.use('seaborn-darkgrid')
class BaseTraj:
def __init__(self, model, X):
self.model = model
assert len(X.shape) == 2, f"X should be 2-d, instead got shape {X.shape}"
self.X = X
self.means = self.model.means_.copy()
self.states = self.model.predict(X)
self.n_states = len(np.unique(self.states))
self.trans = self.model.transmat_.copy()
def rho_dt_bins(self, rho, theta, dt, bins=12):
"""
Bin rho values and dwell time on polar coordinates.
:param rho:
:param theta:
:param dt:
:param bins:
:return:
"""
bins = np.linspace(-np.pi, np.pi, bins+1)
bin_means = (bins[:-1] + bins[1:]) / 2
bin_ix = np.digitize(theta, bins)
bin_rd = [rho[(bin_ix == i) & (rho > 0)].mean()
if len(rho[(bin_ix == i) & (rho > 0)]) > 0 else
0 for i in range(1, len(bins))]
bin_dt = [dt[(bin_ix == i) & (dt > 0)].sum()
if len(dt[(bin_ix == i) & (dt > 0)]) > 0 else
0 for i in range(1, len(bins))]
return bin_means, bin_rd, bin_dt
def transition_vectors(self):
"""
Transition vectors between states on polar coordinates.
:return:
"""
mu_x, mu_y = self.means[:, 0], self.means[:, 1]
mu_x_dist = mu_x - mu_x[:, np.newaxis]
mu_y_dist = mu_y - mu_y[:, np.newaxis]
dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten()))
trans_rho, trans_theta = self.cart2pol(dist_vect)
trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten()
return trans_rho, trans_theta
def design_transition(self, thresh=0.1):
design_trans = self.trans
diag_ix = np.diag_indices(len(design_trans))
design_trans[diag_ix] = 0
design_trans[design_trans < thresh] = 0
design_trans[design_trans >= thresh] = 1
return design_trans
def norm_trans_time(self):
"""
Normalized transition time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
# normalize by transition probability
dt = (counts * self.design_transition()).flatten()
return dt / dt.sum()
def norm_state_time(self):
"""
Normalized state time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
return counts / counts.sum()
@staticmethod
def cart2pol(arr):
"""
Cartesion space to polar space.
Args:
arr (numpy.array): Array of shape [n_state x dims]
"""
x, y = arr[:, 0], arr[:, 1]
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return rho, theta
class PhenoSign(BaseTraj):
"""Phenotypic Signature class."""
def __init__(self, model, X):
super(PhenoSign, self).__init__(model, X)
self.bin_means, self.signature = self.get_signature()
def get_signature(self):
"""
Calculate phenotypic signature for a given model.
:return: bin_means, array of shape [4 x n_bins] with
1. state radial distances
2. state dwell times
3. transition distances
3. transition dwell times
"""
# states
mu_rho, mu_theta = self.cart2pol(self.means)
state_dt = self.norm_state_time()
bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt)
# transitions
trans_rho, trans_theta = self.transition_vectors()
trans_dt = self.norm_trans_time()
bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt)
assert (bin_means_1 == bin_means_2).all(), "state and transition vectors are binned differently and can" \
"not be concatenated."
return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))
class Saphire(PhenoSign):
"""Implementation of the SAPHIRE algorithm for plotting Hidden Markov Models.
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>. Time series modeling of live-cell shape dynamics for
image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90.
"""
def __init__(self, model, X):
super(Saphire, self).__init__(model, X)
def plot_traj(self, projection='cartesian', ymax=None):
"""
Plot cell trajectory.
Args:
projection (str): cartesian or polar.
ymax (int)
"""
avail_proj = ['cartesian', 'polar']
projection = projection.lower()
assert projection in avail_proj, f"projection unknown: {projection}"
if projection == 'cartesian':
projection = None
cmap = plt.get_cmap('binary')
cmap = truncate_colormap(cmap, minval=0.2)
if projection == 'polar':
y, x = self.cart2pol(self.X)
y_mu, x_mu = self.cart2pol(self.means)
else:
x, y = self.X[:, 0], self.X[:, 1]
x_mu, y_mu = self.means[:, 0], self.means[:, 1]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection})
ax.scatter(x, y,
c=self.states, cmap='Set1', zorder=2)
traj = ax.scatter(x_mu, y_mu,
c=np.unique(self.states), cmap='Set1',
s=200, zorder=2, edgecolor='black', alpha=0.6)
legend = ax.legend(*traj.legend_elements(),
loc="upper right", bbox_to_anchor=(1.2, 0.94),
title="States")
ax.add_artist(legend)
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
colorline(x, y, cmap=cmap, zorder=1)
norm = mpl.colors.Normalize(vmin=0, vmax=48)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Time')
plt.show()
return fig, ax
def plot_states(self, ymax=None):
"""
Plot cell states.
"""
bin_rd, bin_dt = self.signature[0, :], self.signature[1, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Oranges")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing state dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def plot_transition(self, ymax=None):
"""
Plot transition between cell states.
"""
bin_rd, bin_dt = self.signature[2, :], self.signature[3, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Blues")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing transition dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0, zorder=1):
"""
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha, zorder=zorder)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
https://stackoverflow.com/a/18926541
'''
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
| 2.515625 | 3 |
account/views.py | Stfuncode/food-beverage-investigator | 0 | 2669 | import imp
from venv import create
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic import (
ListView,
)
from account.models import *
from account.forms import *
from data.models import *
from django.contrib.auth import login as auth_login
from django.contrib.auth.models import auth
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
# Create your views here.
def login(request):
if request.method == "POST":
form = loginForm(data=request.POST)
if form.is_valid():
user = form.get_user()
auth_login(request, user)
print("succesful login")
remember_me = form.cleaned_data["remember_me"]
if remember_me:
request.session.set_expiry(1209600)
return redirect("home")
else:
messages.warning(request, 'There is an issue with your login processes')
return redirect("login")
else:
form = loginForm()
create_form = createUserForm()
context = {
"form": form,
"create_form": create_form
}
return render(request, "login.html", context)
def logout(request):
auth.logout(request)
return redirect("login")
def register(request):
if request.method == "POST":
create_form = createUserForm(data=request.POST)
if create_form.is_valid():
user = create_form.save(commit=False)
user.save()
messages.success(request, "User created successfully!")
return redirect("login")
else:
messages.error(request, "User creation failed")
else:
create_form = createUserForm()
return render(request, "login.html", {"create_form": create_form})
def homepage(request):
user = Account.objects.filter(is_superuser=False).count()
rest = Restaurant.objects.all().count()
rating = RestaurantReview.objects.exclude(rating__isnull=True).count()
review = RestaurantReview.objects.exclude(review__isnull=True).count()
context = {
"user_count" : user,
"rest_count" : rest,
"rating_count" : rating,
"review_count" : review,
}
return render(request, "home.html", context)
class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.view_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = viewUserForm(request.POST, instance=user)
return redirect("userlist")
def get(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = viewUserForm(instance=user)
context = {
"form": form,
"pk": pk
}
return render(request, "profile.html", context)
class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.change_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = editUserForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
role = request.POST.get("role")
user.save()
messages.success(request, "Successfully updated profile!")
return redirect(f'/viewUser/{user.account_id}')
else:
form = editUserForm(instance=user)
extra_context = {
"form": form,
}
print('something wrong')
messages.error(request, "Invalid input! Please input a valid information.")
return render(request, "editUser.html", extra_context)
def get(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = editUserForm(instance=user)
extra_context = {
"form": form,
}
return render(request, "editUser.html", extra_context)
class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
permission_required = 'accounts.view_account'
template_name = "userList.html"
queryset = Account.objects.all()
class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.change_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
user.profile_pic = request.FILES.get('profile-pic')
user.save()
return redirect('viewUser', pk)
def deleteUser(request, event_id):
event = Account.objects.get(pk=event_id)
event.delete()
return redirect('userlist') | 2.21875 | 2 |
fpds/client.py | mgradowski/aiproject | 0 | 2670 | <filename>fpds/client.py<gh_stars>0
import cv2
import aiohttp
import asyncio
import concurrent.futures
import argparse
import numpy as np
async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0):
loop = asyncio.get_running_loop()
try:
src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id))
while True:
_, im = await loop.run_in_executor(threadpool, src.read)
im = cv2.resize(im, (640, 384))
enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40]
_, im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param))
await ws.send_bytes(im.tobytes())
except asyncio.CancelledError:
pass
finally:
src.release()
async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor):
loop = asyncio.get_running_loop()
try:
while True:
im = await queue.get()
im = np.frombuffer(im, dtype=np.uint8)
im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR))
cv2.imshow('fpds_remote_preview', im)
cv2.waitKey(1)
except asyncio.CancelledError:
pass
finally:
cv2.destroyAllWindows()
async def run_client(
ws: aiohttp.ClientWebSocketResponse,
threadpool: concurrent.futures.ThreadPoolExecutor
) -> None:
# --
dst_queue = asyncio.Queue(maxsize=1)
src_task = asyncio.create_task(camera_source(ws, threadpool))
dst_task = asyncio.create_task(preview_window(dst_queue, threadpool))
try:
while True:
im = await ws.receive_bytes()
await dst_queue.put(im)
except asyncio.CancelledError:
await ws.send_str('close')
src_task.cancel()
dst_task.cancel()
await asyncio.wait([src_task, dst_task])
async def amain(url: str):
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool:
async with aiohttp.ClientSession() as session, session.ws_connect(url) as ws:
await run_client(ws, threadpool)
def main():
parser = argparse.ArgumentParser('fpds.client')
parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds')
args = parser.parse_args()
loop = asyncio.get_event_loop()
task = loop.create_task(amain(args.url))
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
task.cancel()
loop.run_until_complete(asyncio.wait_for(task, timeout=None))
finally:
loop.close()
if __name__ == '__main__':
main()
| 2.484375 | 2 |
Giveme5W1H/extractor/tools/key_value_cache.py | bkrrr/Giveme5W | 410 | 2671 | import logging
import os
import pickle
import sys
import threading
import time
from typing import List
from Giveme5W1H.extractor.root import path
from Giveme5W1H.extractor.tools.util import bytes_2_human_readable
class KeyValueCache(object):
def __init__(self, cache_path):
"""
:param cache_path: path to cache, must be relative to the root.py file
"""
self.log = logging.getLogger('GiveMe5W')
# resolve path relative to the path file
self._cache_path = path(cache_path)
# ad a meaningful extension
self._cache_path = self._cache_path + '.prickle'
self._cache = {}
if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0:
# reload cache object form disc, if any
with open(self._cache_path, 'rb') as ff:
self._cache = pickle.load(ff)
self.log.debug('KeyValueCache: ' + self._cache_path + ' restored')
self.log_stats()
else:
self._cache = {}
self._lock = threading.Lock()
def log_stats(self):
# size is not considering child's
self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable(
sys.getsizeof(self._cache)))
def persist(self):
with open(self._cache_path, 'wb') as f:
pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL)
def cache(self, key: str, value: object):
"""
None values are considered as invalid results (ToughRequest) is producing none for exceptions
set -1 if you want to store "No distance"
:param key:
:param value:
:return:
"""
self._lock.acquire()
if value is not None:
self._cache[key] = self._pack(value);
self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': ' + str(value))
self.persist()
self._lock.release()
def get(self, key):
"""
Read cache entries
:param key:
:return:
"""
self._lock.acquire()
result = None
value = self._cache.get(key)
if value is not None:
self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': ' + str(value))
result = self._unpack(value)
self._lock.release()
return result
def get_complex(self, list_of_keys: List[str]):
"""
Read complex cache entries
"""
return self.get(self._get_id(list_of_keys))
def cache_complex(self, list_of_keys: List[str], value):
"""
helper to cache multi (string)key values.
They are sorted before concatenation, therefore an order is determined.
"""
self.cache(self._get_id(list_of_keys), value)
def _get_id(self, list_of_keys: List[str]):
"""
sorts list_of_keys, concatenates with # for readability
:param list_of_keys:
:return:
"""
sorted(list_of_keys)
return "#".join(list_of_keys)
def _pack(self, value):
"""
cache tracks the age of an entry, may be helpful in the future
:param value:
:return:
"""
return [value, str(time.time())]
def _unpack(self, value):
"""
removes the timestamp around the cached value, if any
:param value:
:return:
"""
# there are some old entries without timestamp
if isinstance(value, str) or isinstance(value, int):
return value
return value[0]
| 2.328125 | 2 |
nsst_translate_corpus.py | AlexanderJenke/nsst | 0 | 2672 | from argparse import ArgumentParser
from tqdm import tqdm
import NSST
from nsst_translate import best_transition_sequence
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--nsst_file", default="output/nsst_tss20_th4_nSt100_Q0.pkl", help="nsst file")
parser.add_argument("--src_lang", default="output/europarl-v7.de-en.de.clean")
parser.add_argument("--tgt_lang", default="output/europarl-v7.de-en.en.clean")
parser.add_argument("--enforce_n_reg", default=True)
parser.add_argument("--output", default=f"output/nsst_stat_nreg_100Q0.csv")
args = parser.parse_args()
args.enforce_n_final_reg = False
# load NSST
nsst = NSST.NSST()
nsst.load(args.nsst_file)
args.nsst = nsst
# open files
src_file = open(args.src_lang, 'r')
tgt_file = open(args.tgt_lang, 'r')
output_file = open(args.output, 'w')
# iterate over sentences, first 4096 -> test sentences
for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc="Processing sentences"):
# remove line breaks
src = src[:-1]
tgt = tgt[:-1]
# try to translate
try:
# prepare tokenisations
token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0
for word in src.split(" ") if len(word)]
token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0
for word in tgt.split(" ") if len(word)]
# run nsst
args.input = src
args.token_src = token_src
result = best_transition_sequence(args)
# get best result
pred = sorted((k for k in result
if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1)
and ('Q0' in args.nsst_file or k[0] == -1)
),
key=lambda x: x[2],
reverse=True)[0]
n_res = len(result)
q, reg, prob = pred
# write to csv
if not len(reg): # catch empty registers
continue
token_pred = [w for w in reg[0].split(' ') if len(w)]
pred_str = ""
for t in token_pred:
pred_str += f"{nsst.tokenization_tgt_lut[int(t)]} "
token_src_str = ""
for t in token_src:
token_src_str += f"{t} "
token_tgt_str = ""
for t in token_tgt:
token_tgt_str += f"{t} "
token_pred_str = ""
for t in token_pred:
token_pred_str += f"{t} "
print(f"{src};{token_src_str[:-1]};"
f"{tgt};{token_tgt_str[:-1]};"
f"{pred_str};{token_pred_str[:-1]};"
f"{prob};{len(reg)};{n_res}",
file=output_file)
output_file.flush()
except RuntimeError:
pass
# close files
src_file.close()
tgt_file.close()
output_file.close()
| 2.546875 | 3 |
10 Days of Statistics/Day 1/Standard Deviation.py | dhyanpatel110/HACKERRANK | 0 | 2673 | # Import library
import math
# Define functionts
def mean(data):
return sum(data) / len(data)
def stddev(data, size):
sum = 0
for i in range(size):
sum = sum + (data[i] - mean(data)) ** 2
return math.sqrt(sum / size)
# Set data
size = int(input())
numbers = list(map(int, input().split()))
# Get standard deviation
print(round(stddev(numbers, size), 1))
| 3.796875 | 4 |
Homework/Hw4/Solution/problem5a.py | jmsevillam/Herramientas-Computacionales-UniAndes | 0 | 2674 | <reponame>jmsevillam/Herramientas-Computacionales-UniAndes
def decode(word1,word2,code):
if len(word1)==1:
code+=word1+word2
return code
else:
code+=word1[0]+word2[0]
return decode(word1[1:],word2[1:],code)
Alice='Ti rga eoe esg o h ore"ermetsCmuainls'
Bob='hspormdcdsamsaefrte<NAME>ae"'
print(decode(Alice,Bob,''))
| 3.359375 | 3 |
pychron/lasers/power/composite_calibration_manager.py | ASUPychron/pychron | 31 | 2675 | <reponame>ASUPychron/pychron
# ===============================================================================
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float
from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor
# ============= standard library imports ========================
import pickle
import os
from numpy import polyval
# ============= local library imports ==========================
from pychron.managers.manager import Manager
from pychron.database.selectors.power_calibration_selector import (
PowerCalibrationSelector,
)
from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter
from pychron.paths import paths
from pychron.graph.graph import Graph
from pychron.hardware.meter_calibration import MeterCalibration
"""
use a dbselector to select data
"""
class BoundsSelector(HasTraits):
graph = Instance(Graph)
def traits_view(self):
v = View(
Item("graph", show_label=False, style="custom"),
buttons=["OK", "Cancel"],
kind="livemodal",
)
return v
class CompositeCalibrationManager(Manager):
db = Instance(PowerCalibrationAdapter)
selector = Instance(PowerCalibrationSelector)
append = Button
replace = Button
load_graph = Button
save = Button
selected_calibrations = List
selected = Any
results = DelegatesTo("selector")
graph = Instance(Graph)
dclicked = Any
parent_name = "FusionsDiode"
power = Float
input = Float
def _power_changed(self):
pc = self._load_calibration()
pc
if pc is not None:
self.input, _ = pc.get_input(self.power)
def _load_calibration(self):
try:
p = self._get_calibration_path()
with open(p, "rb") as f:
pc = pickle.load(f)
except:
return
return pc
def _dclicked_changed(self):
s = self.selected
if s is not None:
s.bounds = None
s.load_graph()
s.graph.add_range_selector()
bc = BoundsSelector(graph=s.graph)
info = bc.edit_traits()
if info.result:
bounds = s.graph.plots[0].default_index.metadata["selections"]
s.bounds = bounds
s.calibration_bounds = (
polyval(s.coefficients, bounds[0]),
polyval(s.coefficients, bounds[1]),
)
def _append_fired(self):
s = self.selector.selected
if s is not None:
for si in s:
trs = list(si.traits().keys()).remove("graph")
self.selected_calibrations.append(si.clone_traits(traits=trs))
def _replace_fired(self):
s = self.selector.selected
trs = list(s.traits().keys()).remove("graph")
self.selected_calibrations = s.clone_traits(traits=trs)
def _save_fired(self):
self._dump_calibration()
def _dump_calibration(self):
pc = MeterCalibration()
coeffs = []
bounds = []
for s in self.selected_calibrations:
coeffs.append(s.coefficients)
bounds.append(s.calibration_bounds)
pc.coefficients = coeffs
pc.bounds = bounds
p = self._get_calibration_path()
self.info("saving calibration to {}".format(p))
with open(p, "wb") as f:
pickle.dump(pc, f)
def _get_calibration_path(self):
p = os.path.join(
paths.hidden_dir, "{}_power_calibration".format(self.parent_name)
)
return p
def _load_graph_fired(self):
g = self.graph
g.clear()
# g.new_plot(zoom=True, pan=True,
# padding=[40, 10, 10, 40]
# )
has_bounds = False
for i, s in enumerate(self.selected_calibrations):
if s.bounds:
has_bounds = True
elif has_bounds:
g.clear()
self._plot_factory(g)
self.warning_dialog("{} does not have its bounds set".format(s.rid))
break
s.load_graph(graph=g, new_plot=i == 0)
g.redraw()
def traits_view(self):
selector_grp = Group(Item("selector", style="custom", show_label=False))
transfer_grp = VGroup(
spring,
VGroup(Item("append", show_label=False), Item("replace", show_label=False)),
spring,
)
editor = TabularEditor(
adapter=self.selector.tabular_adapter(),
editable=False,
dclicked="object.dclicked",
selected="object.selected",
)
selected_grp = Item("selected_calibrations", editor=editor, show_label=False)
data_tab = Group(
HGroup(selector_grp, transfer_grp, selected_grp),
show_border=True,
label="Data",
)
process_tab = Group(
HGroup(
Item("power"),
Item("input", format_str=" %0.3f ", style="readonly"),
spring,
Item("save", show_label=False),
Item("load_graph", show_label=False),
),
Item("graph", style="custom", show_label=False),
show_border=True,
label="Process",
)
v = View(
VGroup(data_tab, process_tab),
resizable=True,
title="Composite {} Power Calibration".format(self.parent_name),
)
return v
def _graph_default(self):
g = Graph(
container_dict={
# 'fill_padding':True,
# 'bgcolor':'red',
"padding": 5
}
)
self._plot_factory(g)
return g
def _plot_factory(self, graph):
graph.new_plot(
zoom=True,
pan=True,
padding=[50, 10, 10, 40],
xtitle="Setpoint (%)",
ytitle="Measured Power (W)",
)
def _db_default(self):
if self.parent_name == "FusionsDiode":
name = paths.diodelaser_db
else:
name = paths.co2laser_db
db = PowerCalibrationAdapter(name=name, kind="sqlite")
db.connect()
return db
def _selector_default(self):
return self.db._selector_factory()
if __name__ == "__main__":
ccm = CompositeCalibrationManager()
ccm.configure_traits()
# ============= EOF =============================================
| 1.445313 | 1 |
ttt_package/libs/best_move.py | Ipgnosis/tic_tac_toe | 0 | 2676 | <gh_stars>0
# refactored from make_play to simplify
# by Russell on 3/5/21
#from ttt_package.libs.move_utils import get_open_cells
from ttt_package.libs.compare import get_transposed_games, reorient_games
from ttt_package.libs.calc_game_bound import calc_game_bound
from ttt_package.libs.maxi_min import maximin
# find the best move for this agent, based on prior games in the game_history
def best_move(this_board, agent, ttt_base, probs_calc):
candidate_games = []
lower_bound = 0
upper_bound = 0
# note that len gives the number of the move about to be made
num_moves = len(this_board)
bounds_list = []
#print("best_move - this_board:", this_board)
# TRANSPOSE the current game state into 8 different games and store in a list
# the returned value is a list of dictionaries that contain the transposed game
# and the source function, to allow the game to be transposed back
tg_list = get_transposed_games(this_board)
#print("best_move: tg_list =", tg_list)
# for each of the 8 transposed versions of the current game in question
# build a list of lower and upper bound tuples for the tg_list using calc_game_bound
for tgame in tg_list:
lower_bound = calc_game_bound(tgame["transpose"], agent, 'L')
upper_bound = calc_game_bound(tgame["transpose"], agent, 'U')
bounds_tuple = (lower_bound, upper_bound)
bounds_list.append(bounds_tuple)
#print("best_move: bounds_tuple =", bounds_tuple)
# fetch the list of candidate games from the game history
# we need to look at losing and drawing games so that we can thoroughly explore the action space
# we must avoid overlooking a good move made early that resulted in a draw/loss because of a
# later bad move - these will be resolved later via backpropagation
candidate_games = ttt_base.get_games_list(bounds_list)
#print("best_move: candidate_games =", candidate_games)
# if there is at least one game that matches the current game state
if candidate_games != False:
# this is the list of games that match the transposed game list
# de-transpose the candidate games to get the right cell for the next move
# get a list of the matching detransposition games of the current game
reoriented_candidates = reorient_games(tg_list, candidate_games)
#print("best_move: number of reoriented_candidates games = ", len(reoriented_candidates))
#print("best_move: number of candidate games = ", len(candidate_games))
#print('best_move: reoriented_candidates =', reoriented_candidates)
#print('best_move: candidate_games =', candidate_games)
maximin_list = []
# iterate though the game candidates
for this_game in range(len(reoriented_candidates)):
these_probs = []
# get the probability element for the next move of this game candidate
these_probs = reoriented_candidates[this_game]["probs"][num_moves].copy(
)
# tack on the cell # of the move
these_probs.append(
reoriented_candidates[this_game]["game"][num_moves])
# append the game submission data to the list to be submitted to maximin
maximin_list.append(these_probs)
#print("maximin_list:", maximin_list)
# send the list of probabilites of the detransposed recorded games for the next move
recommended_move = maximin(maximin_list)
#print("best_move: move = ", recommended_move)
return recommended_move
else: # there are no matching games in the game history
#print("best_move: random choice...")
# return random_move(this_board)
# estimate the optimal next move
optimal_move = probs_calc.calc_next_move(this_board)
#print("This board =", this_board)
#print("Calculating optimal move =", optimal_move)
return optimal_move
| 2.71875 | 3 |
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py | paser4se/bbxyard | 1 | 2677 | #!/usr/bin/env python3
# python 线程测试
import _thread
import time
from yvhai.demo.base import YHDemo
def print_time(thread_name, interval, times):
for cnt in range(times):
time.sleep(interval)
print(" -- %s: %s" % (thread_name, time.ctime(time.time())))
class RawThreadDemo(YHDemo):
def __init__(self):
super(RawThreadDemo, self).__init__('_thread')
@staticmethod
def main():
try:
_thread.start_new_thread(print_time, ("Thread-01", 1, 10))
_thread.start_new_thread(print_time, ("Thread-02", 2, 6))
except:
print("Error: 无法启动线程")
# 主线程无限等待
while 1:
pass
@staticmethod
def demo(args=[]):
RawThreadDemo.main()
if __name__ == '__main__':
RawThreadDemo.demo()
| 3.5625 | 4 |
rasa/utils/tensorflow/constants.py | praneethgb/rasa | 8 | 2678 | # constants for configuration parameters of our tensorflow models
LABEL = "label"
IDS = "ids"
# LABEL_PAD_ID is used to pad multi-label training examples.
# It should be < 0 to avoid index out of bounds errors by tf.one_hot.
LABEL_PAD_ID = -1
HIDDEN_LAYERS_SIZES = "hidden_layers_sizes"
SHARE_HIDDEN_LAYERS = "share_hidden_layers"
TRANSFORMER_SIZE = "transformer_size"
NUM_TRANSFORMER_LAYERS = "number_of_transformer_layers"
NUM_HEADS = "number_of_attention_heads"
UNIDIRECTIONAL_ENCODER = "unidirectional_encoder"
KEY_RELATIVE_ATTENTION = "use_key_relative_attention"
VALUE_RELATIVE_ATTENTION = "use_value_relative_attention"
MAX_RELATIVE_POSITION = "max_relative_position"
BATCH_SIZES = "batch_size"
BATCH_STRATEGY = "batch_strategy"
EPOCHS = "epochs"
RANDOM_SEED = "random_seed"
LEARNING_RATE = "learning_rate"
DENSE_DIMENSION = "dense_dimension"
CONCAT_DIMENSION = "concat_dimension"
EMBEDDING_DIMENSION = "embedding_dimension"
ENCODING_DIMENSION = "encoding_dimension"
SIMILARITY_TYPE = "similarity_type"
LOSS_TYPE = "loss_type"
NUM_NEG = "number_of_negative_examples"
MAX_POS_SIM = "maximum_positive_similarity"
MAX_NEG_SIM = "maximum_negative_similarity"
USE_MAX_NEG_SIM = "use_maximum_negative_similarity"
SCALE_LOSS = "scale_loss"
REGULARIZATION_CONSTANT = "regularization_constant"
NEGATIVE_MARGIN_SCALE = "negative_margin_scale"
DROP_RATE = "drop_rate"
DROP_RATE_ATTENTION = "drop_rate_attention"
DROP_RATE_DIALOGUE = "drop_rate_dialogue"
DROP_RATE_LABEL = "drop_rate_label"
CONSTRAIN_SIMILARITIES = "constrain_similarities"
WEIGHT_SPARSITY = "weight_sparsity" # Deprecated and superseeded by CONNECTION_DENSITY
CONNECTION_DENSITY = "connection_density"
EVAL_NUM_EPOCHS = "evaluate_every_number_of_epochs"
EVAL_NUM_EXAMPLES = "evaluate_on_number_of_examples"
INTENT_CLASSIFICATION = "intent_classification"
ENTITY_RECOGNITION = "entity_recognition"
MASKED_LM = "use_masked_language_model"
SPARSE_INPUT_DROPOUT = "use_sparse_input_dropout"
DENSE_INPUT_DROPOUT = "use_dense_input_dropout"
RANKING_LENGTH = "ranking_length"
MODEL_CONFIDENCE = "model_confidence"
BILOU_FLAG = "BILOU_flag"
RETRIEVAL_INTENT = "retrieval_intent"
USE_TEXT_AS_LABEL = "use_text_as_label"
SOFTMAX = "softmax"
MARGIN = "margin"
AUTO = "auto"
INNER = "inner"
LINEAR_NORM = "linear_norm"
COSINE = "cosine"
CROSS_ENTROPY = "cross_entropy"
BALANCED = "balanced"
SEQUENCE = "sequence"
SEQUENCE_LENGTH = f"{SEQUENCE}_lengths"
SENTENCE = "sentence"
POOLING = "pooling"
MAX_POOLING = "max"
MEAN_POOLING = "mean"
TENSORBOARD_LOG_DIR = "tensorboard_log_directory"
TENSORBOARD_LOG_LEVEL = "tensorboard_log_level"
SEQUENCE_FEATURES = "sequence_features"
SENTENCE_FEATURES = "sentence_features"
FEATURIZERS = "featurizers"
CHECKPOINT_MODEL = "checkpoint_model"
MASK = "mask"
IGNORE_INTENTS_LIST = "ignore_intents_list"
TOLERANCE = "tolerance"
POSITIVE_SCORES_KEY = "positive_scores"
NEGATIVE_SCORES_KEY = "negative_scores"
RANKING_KEY = "label_ranking"
QUERY_INTENT_KEY = "query_intent"
SCORE_KEY = "score"
THRESHOLD_KEY = "threshold"
SEVERITY_KEY = "severity"
NAME = "name"
EPOCH_OVERRIDE = "epoch_override"
| 1.882813 | 2 |
client/canyons-of-mars/maze.py | GamesCreatorsClub/GCC-Rover | 3 | 2679 | <reponame>GamesCreatorsClub/GCC-Rover
#
# Copyright 2016-2019 Games Creators Club
#
# MIT License
#
import math
import pyroslib
import pyroslib.logging
import time
from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG
from rover import WheelOdos, WHEEL_NAMES
from rover import normaiseAngle, angleDiference
from challenge_utils import Action, PID
SQRT2 = math.sqrt(2)
PIhalf = math.pi / 2
class MazeAttitude:
UNKNOWN = 0
LEFT_WALL = 1
RIGHT_WALL = 2
FRONT_WALL = 4
BACK_WALL = 8
NO_GAP = 0
FORWARD_GAP = 1
SIDE_GAP = 2
POINTS = [0, 45, 90, 135, 180, 225, 270, 315]
WALLS = [90, 270, 0, 180]
L0_45 = 0
L45_90 = 45
L90_135 = 90
L135_180 = 135
L180_225 = 180
L225_270 = 225
L270_315 = 270
L315_0 = 315
LINES = [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0]
ANGLE_TOLLERANCE = 1.075
@staticmethod
def normAngle(a):
if a > PIhalf:
a = a - math.pi
elif a <= -PIhalf:
a = a + math.pi
return a
class Line:
def __init__(self, line_index, long_point_index, short_point_index, factor, adjust):
self.line_index = line_index
self.short_point_index = short_point_index
self.long_point_index = long_point_index
self.factor = factor
self.adjust = adjust
self.angle = None
def calcAngle(self, distances):
long_distance = distances[self.long_point_index]
short_distance = distances[self.short_point_index]
if long_distance is not None and short_distance is not None:
lsqrt2 = long_distance / SQRT2
self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor + self.adjust)
else:
self.angle = None
class Wall:
def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index):
self.ds_angle = distance_sensor_angle
self.ds_index = distance_sensor_index
self.wall_point_kind = wall_point_kind
self.left_mid_point_index = left_mid_point_index
self.left_point_index = left_point_index
self.mid_point_index = mid_point_index
self.right_point_index = right_point_index
self.is_front_or_back = self.ds_angle == 0 or self.ds_angle == 180
self.selected_line = None
self.angle = None
self.distance = None
def setAngle(self, angle, distances):
self.angle = angle
distance = distances[self.mid_point_index]
if distance < 1:
self.distance = 0
else:
if self.is_front_or_back:
self.distance = abs(int(math.sin(angle) * distance))
else:
self.distance = abs(int(math.cos(angle) * distance))
def setAngleAndDistance(self, angle, distance):
self.angle = angle
self.distance = distance
def tryFindingWall(self, distances, lines, points):
lmline = lines[self.left_mid_point_index]
lline = lines[self.left_point_index]
mline = lines[self.mid_point_index]
rline = lines[self.right_point_index]
dlong1 = distances[lline.long_point_index]
dmid = distances[mline.short_point_index]
dlong2 = distances[mline.long_point_index]
plong1 = points[self.left_point_index]
pmid = points[self.mid_point_index]
plong2 = points[self.right_point_index]
if dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE:
points[self.mid_point_index] = points[lline.long_point_index]
angle = MazeAttitude.normAngle(mline.angle - PIhalf)
distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2)
self.setAngleAndDistance(angle, distance)
elif dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE:
points[self.mid_point_index] = points[rline.long_point_index]
angle = MazeAttitude.normAngle(mline.angle + PIhalf)
distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2)
self.setAngleAndDistance(angle, distance)
elif lline.angle is not None and mline.angle is not None:
if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE:
if plong1 == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
if pmid == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
if plong2 == MazeAttitude.UNKNOWN:
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
else:
if dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
elif plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
elif lline.angle is not None and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif mline.angle is not None and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
def __init__(self):
self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi),
self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf),
self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi),
self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)}
self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135)
self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315)
self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45)
self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225)
self.left_gap = self.NO_GAP
self.right_gap = self.NO_GAP
self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall}
self.points = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
self.distances = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
def calculate(self, state):
def getPointDistance(state, angle):
distance = state.radar.radar[angle]
status = state.radar.status[angle]
if status == 0:
return distance
last_distance = state.radar.last_radar[angle]
if abs(distance - last_distance) < 100:
return distance
return None
def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall):
if wall.angle is None and self.distances[wall.ds_angle] is not None:
if preferable_wall.angle is not None:
wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index])
else:
wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index])
self.points[wall.ds_angle] = wall.wall_point_kind
self.distances = {p: getPointDistance(state, p) for p in self.POINTS}
for line in self.lines:
self.lines[line].calcAngle(self.distances)
wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not None]
wall_processing_order = sorted(wls,
key=lambda wall: self.distances[wall.ds_angle])
for wall in wall_processing_order:
wall.tryFindingWall(self.distances, self.lines, self.points)
updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall)
updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall)
updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall)
updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall)
# TODO calc gaps
class MoveForwardOnOdo(Action):
def __init__(self, agent, stop_action=None):
super(MoveForwardOnOdo, self).__init__(agent)
self.stop_action = stop_action
self.required_odo = {'fl': 0, 'fr': 0, 'bl': 0, 'br': 0}
def setRequiredOdo(self, distance):
for wheel_name in WHEEL_NAMES:
self.required_odo[wheel_name] = distance
def start(self):
super(MoveForwardOnOdo, self).start()
state = self.rover.getRoverState()
for wheel in self.required_odo:
self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel])
log(LOG_LEVEL_DEBUG, "Reset odo to " + str(self.required_odo) + "; starting...")
self.rover.command(pyroslib.publish, 300, 120)
# pyroslib.publish("move/steer", "300 120")
def end(self):
super(MoveForwardOnOdo, self).end()
def next(self):
state = self.rover.getRoverState()
do_stop = False
log(LOG_LEVEL_DEBUG, "Driving to " + str(self.required_odo))
for wheel_name in WHEEL_NAMES:
if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]:
do_stop = True
if state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0:
do_stop = True
if do_stop:
return self.stop_action
else:
return self
def execute(self):
pass
def getActionName(self):
return "Forward ODO"
class MazeAction(Action):
LEFT = -1
RIGHT = 1
def __init__(self, agent):
super(MazeAction, self).__init__(agent)
def check_next_action_conditions(self):
return self
class ChicaneAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(ChicaneAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance
self.speed = speed
self.next_action = next_action
if self.left_or_right == MazeAction.RIGHT:
self.a1 = 45
self.a2 = 90
self.a3 = 135
else:
self.a1 = 315
self.a2 = 270
self.a3 = 225
self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self)
self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None))
def start(self):
super(ChicaneAction, self).start()
def end(self):
super(ChicaneAction, self).end()
def next(self):
if self.left_or_right == self.LEFT:
diagonal_distance = state.radar.radar[45]
else:
diagonal_distance = state.radar.radar[315]
if self.left_or_right == self.LEFT and diagonal_distance > 800:
log(LOG_LEVEL_INFO, "Found second part of chicane, rfd={: 4d}".format(int(diagonal_distance)))
self.left_or_right = self.RIGHT
elif self.left_or_right == self.RIGHT and diagonal_distance > 800:
log(LOG_LEVEL_INFO, "Found end ofchicane - leaging, rfd={: 4d}".format(int(diagonal_distance)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
front_distance = state.radar.radar[0]
gain = 60
offset = 150
# Values that worked speed=150, steer=5-7, dist=4
# self.speed = 150 # 150
speed = 50 # mm/second - TODO use odo to update to correct value!
speed_steer_fudge_factor = 5 # 5-7
speed_distance_fudge_factor = 4 # 4
min_angle = 1 * math.pi / 180
steer_speed = speed * speed_steer_fudge_factor
distance_speed = speed * speed_distance_fudge_factor
if self.left_or_right == self.RIGHT:
distance = -1000000000
distance_from_wall = state.radar.radar[90]
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = math.pi / 4
if front_distance < 450:
angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
elif distance_error < 0 and distance_error < -distance_speed:
angle = -math.pi / 4
if front_distance < 450:
angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
else:
try:
angle = math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
else:
distance = 1000000000
distance_from_wall = state.radar.radar[270]
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = -math.pi / 4
if front_distance < 450:
angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
elif distance_error < 0 and distance_error < -distance_speed:
angle = math.pi / 4
if front_distance < 450:
angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
else:
try:
angle = -math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
distance = int(distance)
angle = int(angle * 180 / math.pi)
self.rover.command(pyroslib.publish, self.speed, angle, distance)
# pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle))
wheel_orientations = state.wheel_odos.odos
log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format(
float(time.time()),
int(front_distance),
int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error),
int(0 * 180 / math.pi), int(0), int(0 * 180 / math.pi), int(0),
int(steer_speed), int(distance_speed),
int(distance), int(angle), int(state.heading.heading),
float(state.wheel_orientations.orientations['fl'])
))
def getActionName(self):
return "Chicane " + ("L" if self.left_or_right == self.LEFT else "R")
class MazeCorridorAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(MazeCorridorAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance
self.speed = speed
self.next_action = next_action
if self.left_or_right == MazeAction.RIGHT:
self.a1 = 45
self.a2 = 90
self.a3 = 135
else:
self.a1 = 315
self.a2 = 270
self.a3 = 225
self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self)
self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self)
# self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None))
self.been_in_chicane = False
def start(self):
super(MazeCorridorAction, self).start()
self.been_in_chicane = False
def end(self):
super(MazeCorridorAction, self).end()
def next(self):
left_diagonal_distance = state.radar.radar[315]
front_distance = state.radar.radar[0]
if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100:
log(LOG_LEVEL_INFO, "Front distance not correct: d={:4d} s={:2d} delta={:4d}".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0]))
else:
if state.left_front_distance_of_wall > 100 and front_distance < 550:
expected_diagonal_distance = 0
if state.left_wall_angle < 0:
expected_diagonal_distance = front_distance * 2 * math.cos(math.pi / 4 + state.left_wall_angle)
else:
expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2
if False and not self.been_in_chicane and front_distance > 300 and left_diagonal_distance > expected_diagonal_distance * 1.2:
log(LOG_LEVEL_INFO, "Found chicane... lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance)))
self.been_in_chicane = True
return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self)
else:
log(LOG_LEVEL_INFO, "Found corner - turning, lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance)))
return self.left_corner_action
if front_distance < 550 and state.radar.radar_deltas[0] < 0:
left_distances = state.radar.radar[270] + state.radar.radar[315]
right_distances = state.radar.radar[90] + state.radar.radar[45]
if left_distances > right_distances:
log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances)))
return self.left_corner_action
else:
log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances)))
return self.right_corner_action
if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100 and front_distance < 700:
log(LOG_LEVEL_INFO, "Found final corner - turning to finish, rfd={: 4d} fd={: 4d} ".format(int(state.right_front_distance_of_wall), int(front_distance)))
return self.right_corner_action
return self
def execute(self):
state = self.rover.getRoverState()
left_diagonal_distance = state.radar.radar[315]
front_distance = state.radar.radar[0]
gain = 60
offset = 150
# Values that worked speed=150, steer=5-7, dist=4
# self.speed = 150 # 150
speed = 50 # mm/second - TODO use odo to update to correct value!
speed_steer_fudge_factor = 5 # 5-7
speed_distance_fudge_factor = 4 # 4
min_angle = 1 * math.pi / 180
steer_speed = speed * speed_steer_fudge_factor
distance_speed = speed * speed_distance_fudge_factor
if self.left_or_right == self.RIGHT:
wall_angle = state.right_wall_angle
if -min_angle < state.right_wall_angle < min_angle:
distance = 1000000000
else:
distance = steer_speed / state.right_wall_angle
if 0 <= distance < 150:
distance = 150
elif -150 < distance < 0:
distance = -150
distance = -distance
distance_from_wall = state.right_wall_distance
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = math.pi / 4
elif distance_error < 0 and distance_error < -distance_speed:
angle = -math.pi / 4
else:
try:
angle = math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
else:
wall_angle = state.left_wall_angle
if -min_angle < state.left_wall_angle < min_angle:
distance = 1000000000
else:
distance = steer_speed / state.left_wall_angle
if 0 <= distance < 150:
distance = 150
elif -150 < distance < 0:
distance = -150
distance_from_wall = state.left_wall_distance
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = -math.pi / 4
elif distance_error < 0 and distance_error < -distance_speed:
angle = math.pi / 4
else:
try:
angle = -math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
distance = int(distance)
angle = int(angle * 180 / math.pi)
self.rover.command(pyroslib.publish, self.speed, angle, distance)
# pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle))
wheel_orientations = state.wheel_odos.odos
#
log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format(
float(time.time()),
int(front_distance),
int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error),
int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall),
int(steer_speed), int(distance_speed),
int(distance), int(angle), int(state.heading.heading),
float(state.wheel_orientations.orientations['fl'])
))
def getActionName(self):
return "Corridor"
class MazeTurnAroundCornerAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(MazeTurnAroundCornerAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance * (1 if left_or_right == self.RIGHT else -1)
self.speed = speed
self.start_heading = 0
self.last_heading = 0
self.requested_heading = 0
self.pid = None
self.next_action = next_action
self.error = 0
def start(self):
super(MazeTurnAroundCornerAction, self).start()
state = self.rover.getRoverState()
self.start_heading = state.heading.heading
self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right == self.RIGHT else -1))
self.pid = PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference)
self.pid.process(self.requested_heading, self.start_heading)
log(LOG_LEVEL_INFO, "Starting to turn around corner at distance {:04d} at speed {:04d}, start heading {:07.3f}, requested heading {:07.3f}".format(self.distance, self.speed, self.start_heading, self.requested_heading))
self.rover.command(pyroslib.publish, self.speed, 0, self.distance)
# pyroslib.publish("move/steer", str(self.distance) + " " + str(self.speed))
def end(self):
super(MazeTurnAroundCornerAction, self).end()
def next(self):
heading = state.heading.heading
self.error = self.pid.process(self.requested_heading, heading)
if self.left_or_right == self.LEFT and self.error > 0:
return self
elif self.left_or_right == self.RIGHT and self.error < 0:
return self
else:
if self.next_action is not None:
log(LOG_LEVEL_INFO, "Finished turning around the corner - invoking next action " + self.next_action.getActionName())
else:
log(LOG_LEVEL_INFO, "Finishing turning - no next action spectified.")
return self.next_action
def execute(self):
state = self.rover.getRoverState()
heading = state.heading.heading
last_heading = self.last_heading
self.last_heading = heading
log(LOG_LEVEL_INFO, "Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}"
.format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error))
def getActionName(self):
return "Turn-Around-Corner"
class DriverForwardForTimeAction(Action):
def __init__(self, agent, time, speed, next_action):
super(DriverForwardForTimeAction, self).__init__(agent)
self.time = time
self.speed = speed
self.next_action = next_action
def start(self):
self.rover.command(pyroslib.publish, self.speed, 0)
# pyroslib.publish("move/drive", "0 " + str(self.speed))
log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.")
def end(self):
pass
def next(self):
if self.time > 0:
self.time -= 1
log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.")
return self
return self.next_action
if __name__ == "__main__":
from rover import Radar, RoverState
radar_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10}
radar_last_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10}
radar_status = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
attitude = MazeAttitude()
radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status))
state = RoverState(None, None, None, radar, None, None)
def printWallLines(a):
if attitude.lines[a].angle is None:
print("{:3d} -> point too far - not calculated".format(a))
else:
angle = int(attitude.lines[a].angle * 180 / math.pi)
point = attitude.points[a]
if point is None:
print("{:3d} -> line at {:3d} angle".format(a, angle))
else:
if point == MazeAttitude.LEFT_WALL:
wall = "left wall"
elif point == MazeAttitude.RIGHT_WALL:
wall = "right wall"
elif point == MazeAttitude.FRONT_WALL:
wall = "front wall"
elif point == MazeAttitude.BACK_WALL:
wall = "back wall"
else:
wall = "no wall"
print("{:3d} -> line at {:3d} angle belogs to {:s}".format(a, angle, wall))
def printWall(w):
if w.angle is None:
print("Wall {:3d} -> is too far - not calculated".format(w.ds_angle))
else:
if w.distance is None:
print("Wall {:3d} -> has angle {:3d} but is too far - distance not calculated".format(w.ds_angle, int(w.angle * 180 / math.pi)))
else:
print("Wall {:3d} -> has angle {:3d} and is at {:3d}".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance))
def printWalls():
for p in attitude.points:
printWallLines(p)
for w in attitude.walls:
printWall(w)
print("----------------------------------------------------------")
# attitude.calculate(state)
# printWalls()
#
# state.radar.radar[0] = 5
# state.radar.radar[45] = SQRT2 * 5 * 0.9
# state.radar.radar[315] = SQRT2 * 17
# state.radar.radar[270] = SQRT2 * 13
# state.radar.radar[225] = SQRT2 * 12
# attitude.calculate(state)
# printWalls()
state.radar.radar[180] = 50
state.radar.radar[315] = 30
attitude.calculate(state)
printWalls()
| 2.734375 | 3 |
src/spaceone/monitoring/conf/proto_conf.py | jean1042/monitoring | 5 | 2680 | PROTO = {
'spaceone.monitoring.interface.grpc.v1.data_source': ['DataSource'],
'spaceone.monitoring.interface.grpc.v1.metric': ['Metric'],
'spaceone.monitoring.interface.grpc.v1.project_alert_config': ['ProjectAlertConfig'],
'spaceone.monitoring.interface.grpc.v1.escalation_policy': ['EscalationPolicy'],
'spaceone.monitoring.interface.grpc.v1.event_rule': ['EventRule'],
'spaceone.monitoring.interface.grpc.v1.webhook': ['Webhook'],
'spaceone.monitoring.interface.grpc.v1.maintenance_window': ['MaintenanceWindow'],
'spaceone.monitoring.interface.grpc.v1.alert': ['Alert'],
'spaceone.monitoring.interface.grpc.v1.note': ['Note'],
'spaceone.monitoring.interface.grpc.v1.event': ['Event'],
}
| 1.0625 | 1 |
tests/delete_regress/models.py | PirosB3/django | 2 | 2681 | <gh_stars>1-10
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Award(models.Model):
name = models.CharField(max_length=25)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
content_object = GenericForeignKey()
class AwardNote(models.Model):
award = models.ForeignKey(Award)
note = models.CharField(max_length=100)
class Person(models.Model):
name = models.CharField(max_length=25)
awards = GenericRelation(Award)
class Book(models.Model):
pagecount = models.IntegerField()
class Toy(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
name = models.CharField(max_length=50)
toys = models.ManyToManyField(Toy, through='PlayedWith')
class PlayedWith(models.Model):
child = models.ForeignKey(Child)
toy = models.ForeignKey(Toy)
date = models.DateField(db_column='date_col')
class PlayedWithNote(models.Model):
played = models.ForeignKey(PlayedWith)
note = models.TextField()
class Contact(models.Model):
label = models.CharField(max_length=100)
class Email(Contact):
email_address = models.EmailField(max_length=100)
class Researcher(models.Model):
contacts = models.ManyToManyField(Contact, related_name="research_contacts")
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name")
meal = models.CharField(max_length=20)
# Models for #15776
class Policy(models.Model):
policy_number = models.CharField(max_length=10)
class Version(models.Model):
policy = models.ForeignKey(Policy)
class Location(models.Model):
version = models.ForeignKey(Version, blank=True, null=True)
class Item(models.Model):
version = models.ForeignKey(Version)
location = models.ForeignKey(Location, blank=True, null=True)
# Models for #16128
class File(models.Model):
pass
class Image(File):
class Meta:
proxy = True
class Photo(Image):
class Meta:
proxy = True
class FooImage(models.Model):
my_image = models.ForeignKey(Image)
class FooFile(models.Model):
my_file = models.ForeignKey(File)
class FooPhoto(models.Model):
my_photo = models.ForeignKey(Photo)
class FooFileProxy(FooFile):
class Meta:
proxy = True
class OrgUnit(models.Model):
name = models.CharField(max_length=64, unique=True)
class Login(models.Model):
description = models.CharField(max_length=32)
orgunit = models.ForeignKey(OrgUnit)
class House(models.Model):
address = models.CharField(max_length=32)
class OrderedPerson(models.Model):
name = models.CharField(max_length=32)
lives_in = models.ForeignKey(House)
class Meta:
ordering = ['name']
| 2.09375 | 2 |
All_Program.py | TheoSaify/Yolo-Detector | 0 | 2682 | import cv2
from cv2 import *
import numpy as np
from matplotlib import pyplot as plt
###############################SIFT MATCH Function#################################
def SIFTMATCH(img1,img2):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.moveWindow('output', 150,150) # Move it to (40,30)
cv2.imshow('output',img3)
cv2.waitKey(0) #The function waits for specified milliseconds for any keyboard event
cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we created
###################################################################################################
#################################Function#########################
def CercleDetection(img1):
# Read Image
raw_image = cv2.imread(img1)
# Bilateral filtering forms a very good way to preserve edges. It is a non-linear filter and helps reduce noise
# The parameters used are: the image, window size for averaging the neighbour, sigmaColor(Sigma value in the color space.
bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175)
# Canny edge detector to detect edges in the image It takes 3 parameters: image, lower threshold and upper threshold.
edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)
# Find Contours
_, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_list = []
for contour in contours:
approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
area = cv2.contourArea(contour)
if ((len(approx) > 8) & (len(approx) < 23) & (area > 50000) ):
contour_list.append(contour)
print("area %.3f"%(area))
M = cv2.moments(contour)
# calculate x,y coordinate of center
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1)
cv2.putText(raw_image, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# Draw Contours of circles
cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3)
# Display Images
cv2.imshow("Objects Detected",raw_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return cX,cY
############################################################
###########################MAIN#############################
MIN_MATCH_COUNT = 10
e1 = cv2.getTickCount()
# # initialize the camera
# cam = VideoCapture(0) # 0 -> index of camera
# s, img1 = cam.read()
# ret = cam.set(3,1920);
# ret = cam.set(4,1080);
# if s: # frame captured without any errors
# cv2.namedWindow("output", cv2.WINDOW_NORMAL)
# cv2.imshow("cam-test",img1)
# waitKey(0)
# destroyWindow("cam-test")
# imwrite("Scene.jpg",img1) #save image
# del(cam)
# Scene image in Grayscale
# imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
imgray = cv2.imread('Scene.jpg', 0) # queryImage
# Reference Piece Image
img1 = cv2.imread('img3.jpg',0) # queryImage
# SIFT Algorithm fore Object Detection
SIFTMATCH(img1, imgray)
# image de reference
cX, cY = CercleDetection('img3.jpg')
print('cX = %.3f , cY =%.3f' % (cX, cY))
# Image Webcam
cX2, cY2 = CercleDetection('img3.jpg')
print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2))
deltaX = (cX2-cX)
deltaY = -(CY2-cY)
# Write X and Y values to File
file = open("values.txt", "w")
file.write("%.3f \n" % deltaX)
file.write("%.3f \n" % deltaY)
file.close()
#Calculate time of execution
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
print('time needed to execute')
print(time)
| 2.78125 | 3 |
apps/UI_phone_mcdm.py | industrial-optimization-group/researchers-night | 0 | 2683 | import dash
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import dash_table
import plotly.express as ex
import plotly.graph_objects as go
import pandas as pd
import numpy as np
data = pd.read_csv("./data/Phone_dataset_new.csv", header=0)
details = pd.read_csv("./data/Phone_details.csv", header=0)
names = details.loc[0]
data = data.rename(columns=names)
details = details.rename(columns=names)
maxi = details.loc[1].astype(int)
details_on_card = details.loc[2].astype(int)
details_on_card = details.columns[details_on_card == 1]
fitness_columns = {
"Memory": -1,
"RAM": -1,
"Camera (MP)": -1,
"Price (Euros)": 1,
}
fitness_data = data[fitness_columns] * maxi[fitness_columns].values
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.LITERA],
eager_loading=True,
suppress_callback_exceptions=True,
)
app.layout = html.Div(
children=[
# .container class is fixed, .container.scalable is scalable
dbc.Row(
[
dbc.Col(
html.H1(
children="What is your optimal phone?",
className="text-center mt-4",
)
)
]
),
dbc.Row(
[
dbc.Col(
children=[
# Top card with details(?)
dbc.Card(
children=[
dbc.CardBody(
[
html.H4(
"Researcher's Night Event",
className="card-title text-center",
),
html.P(
(
"This app uses decision support tools to "
"quickly and easily find phones which reflect "
"the user's desires. Input your preferences "
"below. The box on top right shows the phone "
"which matches the preferences the best. "
"The box on bottom right provides some "
"close alternatives."
),
className="card-text",
),
]
)
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.Form(
[
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired operating system",
html_for="os-choice",
),
dbc.RadioItems(
options=[
{
"label": "Android",
"value": "Android",
},
{"label": "iOS", "value": "IOS"},
{
"label": "No preference",
"value": "both",
},
],
id="os-choice",
value="both",
inline=True,
# className="text-center mt-4",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired Memory capacity (GB)",
html_for="memory-choice",
),
dcc.Slider(
id="memory-choice",
min=16,
max=256,
step=None,
included=False,
value=256,
marks={
16: "16",
32: "32",
64: "64",
128: "128",
256: "256",
},
# className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired RAM capacity (GB)",
html_for="ram-choice",
),
dcc.Slider(
id="ram-choice",
min=2,
max=12,
step=1,
value=12,
included=False,
marks={
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "10",
11: "11",
12: "12",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired camera resolution (MP)",
html_for="cam-choice",
),
dcc.Slider(
id="cam-choice",
min=0,
max=130,
step=1,
included=False,
value=70,
marks={
0: "0",
10: "10",
30: "30",
50: "50",
70: "70",
90: "90",
110: "110",
130: "130",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
dbc.FormGroup(
children=[
dbc.Label(
"Choose desired budget (Euros)",
html_for="cost-choice",
),
dcc.Slider(
id="cost-choice",
min=0,
max=1400,
step=1,
included=False,
value=100,
marks={
0: "0",
200: "200",
400: "400",
600: "600",
800: "800",
1000: "1000",
1200: "1200",
1400: "1400",
},
className="text-center mt-5",
),
],
className="mr-3 ml-3 mb-2 mt-2",
),
],
style={"maxHeight": "560px", "overflow": "auto"},
),
],
width={"size": 5, "offset": 1},
),
dbc.Col(
children=[
dbc.Card(
children=[
dbc.CardHeader("The best phone for you is:"),
dbc.CardBody(id="results"),
],
className="mb-4",
),
dbc.Card(
children=[
dbc.CardHeader("Other great phones:"),
dbc.CardBody(
id="other-results",
children=(
[
html.P(
html.Span(
f"{i}. ",
id=f"other-results-list-{i}",
)
)
for i in range(2, 6)
]
+ [
dbc.Tooltip(
id=f"other-results-tooltip-{i}",
target=f"other-results-list-{i}",
placement="right",
style={
"maxWidth": 700,
"background-color": "white",
"color": "white",
"border-style": "solid",
"border-color": "black",
},
)
for i in range(2, 6)
]
),
),
],
className="mt-4",
),
html.Div(id="tooltips"),
],
width={"size": 5, "offset": 0},
className="mb-2 mt-2",
),
]
),
dbc.Row([html.Div(id="callback-dump")]),
],
)
@app.callback(
[
Output("results", "children"),
*[Output(f"other-results-list-{i}", "children") for i in range(2, 6)],
*[Output(f"other-results-tooltip-{i}", "children") for i in range(2, 6)],
],
[
Input(f"{attr}-choice", "value")
for attr in ["os", "memory", "ram", "cam", "cost"]
],
)
def results(*choices):
if choices[0] == "both":
choice_data = data
elif choices[0] == "IOS":
choice_data = data[[True if "IOS" in st else False for st in data["OS"]]]
if choices[0] == "Android":
choice_data = data[[True if "Android" in st else False for st in data["OS"]]]
relevant_data = choice_data[
["Memory", "RAM", "Camera (MP)", "Price (Euros)",]
].reset_index(drop=True)
card_data = choice_data[details_on_card].reset_index(drop=True)
maxi = np.asarray([-1, -1, -1, 1])
relevant_data = relevant_data * maxi
ideal = relevant_data.min().values
nadir = relevant_data.max().values
aspirations = choices[1:] * maxi
distance = (aspirations - relevant_data) / (ideal - nadir)
distance = distance.max(axis=1)
distance_order = np.argsort(distance)
best = table_from_data(card_data.loc[distance_order.values[0]], choices[1:])
total_number = len(distance_order)
if total_number >= 4:
others, tooltips = other_options(card_data.loc[distance_order.values[1:5]])
else:
others, tooltips = other_options(
card_data.loc[distance_order.values[1:total_number]]
)
others = others + [f"{i}. -" for i in range(len(others) + 2, 6)]
tooltips = tooltips + [None for i in range(len(tooltips) + 2, 6)]
return (best, *others, *tooltips)
"""@app.callback(Output("tooltips", "children"), [Input("callback-dump", "children")])
def tooltips(tooldict):
num = len(tooldict["ids"])
content = []
for i in range(num):
content.append(dbc.Tooltip(tooldict["tables"][i], target=tooldict["ids"][i]))
return content"""
def table_from_data(data, choices):
# print(choices)
to_compare = ["Memory", "RAM", "Camera (MP)", "Price (Euros)"]
# print(data[to_compare].values)
diff = (data[to_compare].values - choices) * [1, 1, 1, -1]
colors = [None, None, None] + ["green" if x >= 0 else "red" for x in diff]
# print(np.sign(diff))
return dbc.Table(
[
html.Tbody(
[
html.Tr(
[
html.Th(col),
html.Td([str(data[col]),],),
html.Td([html.Span(" ▉", style={"color": c,},)],),
]
)
for (col, c) in zip(data.index, colors)
]
)
]
)
def table_from_data_horizontal(data):
header = [html.Thead(html.Tr([html.Th(col) for col in data.index]))]
body = [html.Tbody([html.Tr([html.Td(data[col]) for col in data.index])])]
return dbc.Table(header + body)
def other_options(data):
contents = []
tables = []
ids = []
i = 2
for index, row in data.iterrows():
contents.append(f"{i}. {row['Model']}")
tables.append(table_from_data_horizontal(row))
i = i + 1
return contents, tables
if __name__ == "__main__":
app.run_server(debug=False)
| 2.4375 | 2 |
pyxon/utils.py | k-j-m/Pyxon | 0 | 2684 | <reponame>k-j-m/Pyxon<filename>pyxon/utils.py<gh_stars>0
import pyxon.decode as pd
def unobjectify(obj):
"""
Turns a python object (must be a class instance)
into the corresponding JSON data.
Example:
>>> @sprop.a # sprop annotations are needed to tell the
>>> @sprop.b # unobjectify function what parameter need
>>> @sprop.c # to be written out.
>>> class Baz(object): pass
>>> def __init__(self, a, b, c):
>>> self.a = a
>>> self.b = b
>>> self.c = c
>>>
>>> baz = Baz(a=1, b=2, c='three')
>>> unobjectify(baz)
{ 'a':1, 'b':2, 'c':'three' }
"""
cls = obj.__class__
# Create empty data
data = {}
sprops,cprops = _get_registered_props(cls)
# Add simple properties
for p in sprops:
data[p]=getattr(obj,p)
# Add calculated data
for p in cprops:
f2 = cprops[p][1]
data[p]=f2(getattr(obj,p))
data = pd.add_type_property(data, cls)
return data
def _get_registered_props(cls):
"""
Returns all of the registered properties for a given class.
Recursively calls up to parent classes that are inherited from.
"""
sprops = pd.class_sprops.get(cls,{}) # [name]
cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)}
if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)}
parent_cls = pd.conc_to_abstract[cls][0]
parent_sprops, parent_cprops = _get_registered_props(parent_cls)
sprops = list(set(sprops).union(set(parent_sprops)))
cprops2 = parent_cprops.copy()
cprops2.update(cprops)
cprops = cprops2
return sprops,cprops
def obj(cls):
"""
Helper function returns a closure turning objectify into a
single argument function. This cuts down the amount of code
needed in class annotations by removing the need to write
lambda functions.
"""
return lambda d: objectify(d, cls)
def objectify(data, cls):
"""
Function takes JSON data and a target class as arguments
and returns an instance of the class created using the
JSON data.
I'm not sure whether it is a great idea to keep (un)objectify
separate from the decode module, since they need to access
some of the module-level parameters.
"""
# Create empty class
concrete_cls = pd.conc2(data, cls)
obj = concrete_cls()
sprops,cprops = _get_registered_props(cls)
# Add simple properties from data
for p in sprops:
setattr(obj, p, data[p])
# Add calculated properties from data
for p in cprops:
f1 = cprops[p][0]
setattr(obj, p, f1(data[p]))
return obj
def transform_map(kfun=lambda x: x, vfun=lambda x: x):
"""
Function that takes two functions as arguments and returns
a function that applies those functions over all of the
keys and values in a map and returns the transformed version
of the map.
kfun: function applied to all keys (default identity)
vfun: function applied to all values (default identity)
(k -> k') -> (v -> v') -> ((k, v) -> (k', v'))
"""
return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()])
def transform_list(item_decoder=lambda x: x):
return lambda lst: map(item_decoder, lst)
def identity(x):
"""
Identity function is needed when performing transformations
on maps where some operation is needed on either the keys
or values, but not both.
"""
return x
| 2.90625 | 3 |
AxonDeepSeg/segment.py | sophie685/newfileplzworklord | 0 | 2685 | # Segmentation script
# -------------------
# This script lets the user segment automatically one or many images based on the default segmentation models: SEM or
# TEM.
#
# <NAME> - 2017-08-30
# Imports
import sys
from pathlib import Path
import json
import argparse
from argparse import RawTextHelpFormatter
from tqdm import tqdm
import pkg_resources
import AxonDeepSeg
import AxonDeepSeg.ads_utils as ads
from AxonDeepSeg.apply_model import axon_segmentation
from AxonDeepSeg.ads_utils import convert_path
# Global variables
SEM_DEFAULT_MODEL_NAME = "default_SEM_model_v1"
TEM_DEFAULT_MODEL_NAME = "default_TEM_model_v1"
MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models')
MODELS_PATH = Path(MODELS_PATH)
default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME
default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME
default_overlap = 25
# Definition of the functions
def segment_image(path_testing_image, path_model,
overlap_value, config, resolution_model,
acquired_resolution = None, verbosity_level=0):
'''
Segment the image located at the path_testing_image location.
:param path_testing_image: the path of the image to segment.
:param path_model: where to access the model
:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
border effects but more time to perform the segmentation.
:param config: dict containing the configuration of the network
:param resolution_model: the resolution the model was trained on.
:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
process.
:return: Nothing.
'''
# If string, convert to Path objects
path_testing_image = convert_path(path_testing_image)
path_model = convert_path(path_model)
if path_testing_image.exists():
# Extracting the image name and its folder path from the total path.
path_parts = path_testing_image.parts
acquisition_name = Path(path_parts[-1])
path_acquisition = Path(*path_parts[:-1])
# Get type of model we are using
selected_model = path_model.name
# Read image
img = ads.imread(str(path_testing_image))
# Generate tmp file
fp = open(path_acquisition / '__tmp_segment__.png', 'wb+')
img_name_original = acquisition_name.stem
if selected_model == "default_TEM_model_v1":
ads.imwrite(fp,255-img, format='png')
else:
ads.imwrite(fp, img, format='png')
acquisition_name = Path(fp.name).name
segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'
# Performing the segmentation
axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name],
path_model_folder=path_model, config_dict=config, ckpt_name='model',
inference_batch_size=1, overlap_value=overlap_value,
segmentations_filenames=segmented_image_name,
resampled_resolutions=resolution_model, verbosity_level=verbosity_level,
acquired_resolution=acquired_resolution,
prediction_proba_activate=False, write_mode=True)
if verbosity_level >= 1:
print(("Image {0} segmented.".format(path_testing_image)))
# Remove temporary file used for the segmentation
fp.close()
(path_acquisition / '__tmp_segment__.png').unlink()
else:
print(("The path {0} does not exist.".format(path_testing_image)))
return None
def segment_folders(path_testing_images_folder, path_model,
overlap_value, config, resolution_model,
acquired_resolution = None,
verbosity_level=0):
'''
Segments the images contained in the image folders located in the path_testing_images_folder.
:param path_testing_images_folder: the folder where all image folders are located (the images to segment are located
in those image folders)
:param path_model: where to access the model.
:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
border effects but more time to perform the segmentation.
:param config: dict containing the configuration of the network
:param resolution_model: the resolution the model was trained on.
:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
process.
:return: Nothing.
'''
# If string, convert to Path objects
path_testing_images_folder = convert_path(path_testing_images_folder)
path_model = convert_path(path_model)
# Update list of images to segment by selecting only image files (not already segmented or not masks)
img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff'))
and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))]
# Pre-processing: convert to png if not already done and adapt to model contrast
for file_ in tqdm(img_files, desc="Segmentation..."):
print(path_testing_images_folder / file_)
try:
height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape
except:
try:
height, width = ads.imread(str(path_testing_images_folder / file_)).shape
except Exception as e:
raise e
image_size = [height, width]
minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size)
if acquired_resolution < minimum_resolution:
print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, acquired_resolution),
"The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model),
"One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(acquired_resolution * min(image_size) / resolution_model)),
"Image file location: {0}".format(str(path_testing_images_folder / file_))
)
sys.exit(2)
selected_model = path_model.name
# Read image for conversion
img = ads.imread(str(path_testing_images_folder / file_))
# Generate tmpfile for segmentation pipeline
fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+')
img_name_original = file_.stem
if selected_model == "default_TEM_model_v1":
ads.imwrite(fp,255-img, format='png')
else:
ads.imwrite(fp,img, format='png')
acquisition_name = Path(fp.name).name
segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'
axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name],
path_model_folder=path_model, config_dict=config, ckpt_name='model',
inference_batch_size=1, overlap_value=overlap_value,
segmentations_filenames=[segmented_image_name],
acquired_resolution=acquired_resolution,
verbosity_level=verbosity_level,
resampled_resolutions=resolution_model, prediction_proba_activate=False,
write_mode=True)
if verbosity_level >= 1:
tqdm.write("Image {0} segmented.".format(str(path_testing_images_folder / file_)))
# Remove temporary file used for the segmentation
fp.close()
(path_testing_images_folder / '__tmp_segment__.png').unlink()
return None
def generate_default_parameters(type_acquisition, new_path):
'''
Generates the parameters used for segmentation for the default model corresponding to the type_model acquisition.
:param type_model: String, the type of model to get the parameters from.
:param new_path: Path to the model to use.
:return: the config dictionary.
'''
# If string, convert to Path objects
new_path = convert_path(new_path)
# Building the path of the requested model if it exists and was supplied, else we load the default model.
if type_acquisition == 'SEM':
if (new_path is not None) and new_path.exists():
path_model = new_path
else:
path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME
elif type_acquisition == 'TEM':
if (new_path is not None) and new_path.exists():
path_model = new_path
else:
path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME
path_config_file = path_model / 'config_network.json'
config = generate_config_dict(path_config_file)
return path_model, config
def generate_config_dict(path_to_config_file):
'''
Generates the dictionary version of the configuration file from the path where it is located.
:param path_to_config: relative path where the file config_network.json is located.
:return: dict containing the configuration of the network, or None if no configuration file was found at the
mentioned path.
'''
# If string, convert to Path objects
path_to_config_file = convert_path(path_to_config_file)
try:
with open(path_to_config_file, 'r') as fd:
config_network = json.loads(fd.read())
except:
raise ValueError("No configuration file available at this path.")
return config_network
def generate_resolution(type_acquisition, model_input_size):
'''
Generates the resolution to use related to the trained modeL.
:param type_acquisition: String, "SEM" or "TEM"
:param model_input_size: String or Int, the size of the input.
:return: Float, the resolution of the model.
'''
dict_size = {
"SEM":{
"512":0.1,
"256":0.2
},
"TEM":{
"512":0.01
}
}
return dict_size[str(type_acquisition)][str(model_input_size)]
# Main loop
def main(argv=None):
'''
Main loop.
:return: Exit code.
0: Success
2: Invalid argument value
3: Missing value or file
'''
print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__)))
ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
requiredName = ap.add_argument_group('required arguments')
# Setting the arguments of the segmentation
requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \n'+
'SEM: scanning electron microscopy samples. \n'+
'TEM: transmission electron microscopy samples. ')
requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image to segment or path to the folder \n'+
'where the image(s) to segment is/are located.')
ap.add_argument("-m", "--model", required=False, help='Folder where the model is located. \n'+
'The default SEM model path is: \n'+str(default_SEM_path)+'\n'+
'The default TEM model path is: \n'+str(default_TEM_path)+'\n')
ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s) to segment, in micrometers. \n'+
'If no pixel size is specified, a pixel_size_in_micrometer.txt \n'+
'file needs to be added to the image folder path. The pixel size \n'+
'in that file will be used for the segmentation.',
default=None)
ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \n'+
'0 (default) : Displays the progress bar for the segmentation. \n'+
'1: Also displays the path of the image(s) being segmented. \n'+
'2: Also displays the information about the prediction step \n'+
' for the segmentation of current sample. \n'+
'3: Also displays the patch number being processed in the current sample.',
default=0)
ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels) of the patches when doing the segmentation. \n'+
'Higher values of overlap can improve the segmentation at patch borders, \n'+
'but also increase the segmentation time. \n'+
'Default value: '+str(default_overlap)+'\n'+
'Recommended range of values: [10-100]. \n',
default=25)
ap._action_groups.reverse()
# Processing the arguments
args = vars(ap.parse_args(argv))
type_ = str(args["type"])
verbosity_level = int(args["verbose"])
overlap_value = int(args["overlap"])
if args["sizepixel"] is not None:
psm = float(args["sizepixel"])
else:
psm = None
path_target_list = [Path(p) for p in args["imgpath"]]
new_path = Path(args["model"]) if args["model"] else None
# Preparing the arguments to axon_segmentation function
path_model, config = generate_default_parameters(type_, new_path)
resolution_model = generate_resolution(type_, config["trainingset_patchsize"])
# Tuple of valid file extensions
validExtensions = (
".jpeg",
".jpg",
".tif",
".tiff",
".png"
)
# Going through all paths passed into arguments
for current_path_target in path_target_list:
if not current_path_target.is_dir():
if current_path_target.suffix.lower() in validExtensions:
# Handle cases if no resolution is provided on the CLI
if psm == None:
# Check if a pixel size file exists, if so read it.
if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists():
resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r')
psm = float(resolution_file.read())
else:
print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ",
"Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ",
"containing the pixel size value."
)
sys.exit(3)
# Check that image size is large enough for given resolution to reach minimum patch size after resizing.
try:
height, width, _ = ads.imread(str(current_path_target)).shape
except:
try:
height, width = ads.imread(str(current_path_target)).shape
except Exception as e:
raise e
image_size = [height, width]
minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size)
if psm < minimum_resolution:
print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, psm),
"The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model),
"One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(psm * min(image_size) / resolution_model)),
"Image file location: {0}".format(current_path_target)
)
sys.exit(2)
# Performing the segmentation over the image
segment_image(current_path_target, path_model, overlap_value, config,
resolution_model,
acquired_resolution=psm,
verbosity_level=verbosity_level)
print("Segmentation finished.")
else:
print("The path(s) specified is/are not image(s). Please update the input path(s) and try again.")
break
else:
# Handle cases if no resolution is provided on the CLI
if psm == None:
# Check if a pixel size file exists, if so read it.
if (current_path_target / 'pixel_size_in_micrometer.txt').exists():
resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r')
psm = float(resolution_file.read())
else:
print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ",
"Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ",
"containing the pixel size value."
)
sys.exit(3)
# Performing the segmentation over all folders in the specified folder containing acquisitions to segment.
segment_folders(current_path_target, path_model, overlap_value, config,
resolution_model,
acquired_resolution=psm,
verbosity_level=verbosity_level)
print("Segmentation finished.")
sys.exit(0)
# Calling the script
if __name__ == '__main__':
main()
| 3.03125 | 3 |
tests/test_hedges.py | aplested/DC_Pyps | 1 | 2686 | from dcstats.hedges import Hedges_d
from dcstats.statistics_EJ import simple_stats as mean_SD
import random
import math
def generate_sample (length, mean, sigma):
#generate a list of normal distributed samples
sample = []
for n in range(length):
sample.append(random.gauss(mean, sigma))
return sample
def close_enough (a, b, count_error):
if math.fabs (a - b) < math.fabs((a + b) / (count_error * 2)) :
return True
else:
return False
def gaussian_case (sig):
sample_size = 200
count_error = math.sqrt(sample_size)
m1 = 1
m2 = 2
s1 = generate_sample (sample_size, m1, sig)
s2 = generate_sample (sample_size, m2, sig)
h_testing = Hedges_d(s1, s2)
h_testing.hedges_d_unbiased() #answer is in self.d
approx_95CI_lower, approx_95CI_upper = h_testing.approx_CI()
bs_95CI_lower, bs_95CI_upper = h_testing.bootstrap_CI(5000)
print (mean_SD(s1), mean_SD(s2))
print ("h_testing.d, analytic, correction = ", h_testing.d, (m2 - m1) / sig, h_testing.correction)
print ("lower: approx, bootstrap", approx_95CI_lower, bs_95CI_lower)
print ("upper: approx, bootstrap", approx_95CI_upper, bs_95CI_upper)
#bootstrap is similar at high d but gives wider intervals at low d
assert close_enough(approx_95CI_lower, bs_95CI_lower, count_error)
assert close_enough(approx_95CI_upper, bs_95CI_upper, count_error)
assert close_enough(h_testing.d, (m2 - m1) / sig, count_error)
###tests
def test_gaussian_case_low():
gaussian_case(0.2) #expect d = 5
def test_gaussian_case_med():
gaussian_case(0.5) #expect d = 2
def test_gaussian_case_high():
gaussian_case(1.0) #expect d = 1, fail
| 2.796875 | 3 |
src/FYP/fifaRecords/urls.py | MustafaAbbas110/FinalProject | 0 | 2687 | from django.urls import path
from . import views
urlpatterns = [
path('', views.Records, name ="fRec"),
] | 1.523438 | 2 |
spacy_transformers/tests/regression/test_spacy_issue6401.py | KennethEnevoldsen/spacy-transformers | 0 | 2688 | import pytest
from spacy.training.example import Example
from spacy.util import make_tempdir
from spacy import util
from thinc.api import Config
TRAIN_DATA = [
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
]
cfg_string = """
[nlp]
lang = "en"
pipeline = ["transformer","textcat"]
[components]
[components.textcat]
factory = "textcat"
[components.textcat.model]
@architectures = "spacy.TextCatEnsemble.v2"
[components.textcat.model.tok2vec]
@architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
[components.textcat.model.tok2vec.pooling]
@layers = "reduce_mean.v1"
[components.transformer]
factory = "transformer"
"""
# Xfail this until the new spaCy rc is up.
@pytest.mark.xfail
def test_transformer_pipeline_textcat():
"""Test that a pipeline with just a transformer+textcat runs and trains properly.
This used to throw an error because of shape inference issues -
cf https://github.com/explosion/spaCy/issues/6401"""
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
assert nlp.pipe_names == ["transformer", "textcat"]
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
doc = nlp("We're interested at underwater basket weaving.")
cats1 = doc.cats
# ensure IO goes OK
with make_tempdir() as d:
file_path = d / "trained_nlp"
nlp.to_disk(file_path)
nlp2 = util.load_model_from_path(file_path)
doc2 = nlp2("We're interested at underwater basket weaving.")
cats2 = doc2.cats
assert cats1 == cats2
| 2.40625 | 2 |
hydra/client/repl.py | rpacholek/hydra | 0 | 2689 | import asyncio
from ..core.common.io import input
from .action_creator import ActionCreator
class REPL:
def __init__(self, action_queue, config, *args, **kwargs):
self.action_queue = action_queue
self.config = config
async def run(self):
await asyncio.sleep(1)
print("Insert command: ")
action_creator = ActionCreator()
while True:
input_data = await input("~> ")
if not input_data:
for task in asyncio.all_tasks():
task.cancel()
break
action = action_creator.parse(*input_data.split())
if action:
self.action_queue.push_action(action)
| 2.78125 | 3 |
train_dv3.py | drat/Neural-Voice-Cloning-With-Few-Samples | 361 | 2690 | <reponame>drat/Neural-Voice-Cloning-With-Few-Samples
"""Trainining script for seq2seq text-to-speech synthesis model.
usage: train.py [options]
options:
--data-root=<dir> Directory contains preprocessed features.
--checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints].
--hparams=<parmas> Hyper parameters [default: ].
--checkpoint=<path> Restore model from checkpoint path if given.
--checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path.
--checkpoint-postnet=<path> Restore postnet model from checkpoint path.
--train-seq2seq-only Train only seq2seq model.
--train-postnet-only Train only postnet model.
--restore-parts=<path> Restore part of the model.
--log-event-path=<name> Log event path.
--reset-optimizer Reset optimizer.
--load-embedding=<path> Load embedding from checkpoint.
--speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets.
-h, --help Show this help message and exit
"""
from docopt import docopt
import sys
from os.path import dirname, join
from tqdm import tqdm, trange
from datetime import datetime
# The deepvoice3 model
from dv3.deepvoice3_pytorch import frontend, builder
import dv3.audio
import dv3.lrschedule
import torch
from torch.utils import data as data_utils
from torch.autograd import Variable
from torch import nn
from torch import optim
import torch.backends.cudnn as cudnn
from torch.utils import data as data_utils
from torch.utils.data.sampler import Sampler
import numpy as np
from numba import jit
from nnmnkwii.datasets import FileSourceDataset, FileDataSource
from os.path import join, expanduser
import random
import librosa.display
from matplotlib import pyplot as plt
import sys
import os
from tensorboardX import SummaryWriter
from matplotlib import cm
from warnings import warn
from dv3.hparams import hparams, hparams_debug_string
fs = hparams.sample_rate
global_step = 0
global_epoch = 0
use_cuda = torch.cuda.is_available()
if use_cuda:
cudnn.benchmark = False
_frontend = None # to be set later
def _pad(seq, max_len, constant_values=0):
return np.pad(seq, (0, max_len - len(seq)),
mode='constant', constant_values=constant_values)
def _pad_2d(x, max_len, b_pad=0):
x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],
mode="constant", constant_values=0)
return x
def plot_alignment(alignment, path, info=None):
fig, ax = plt.subplots()
im = ax.imshow(
alignment,
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.savefig(path, format='png')
plt.close()
class TextDataSource(FileDataSource):
def __init__(self, data_root, speaker_id=None):
self.data_root = data_root
self.speaker_ids = None
self.multi_speaker = False
# If not None, filter by speaker_id
self.speaker_id = speaker_id
def collect_files(self):
meta = join(self.data_root, "train.txt")
with open(meta, "rb") as f:
lines = f.readlines()
l = lines[0].decode("utf-8").split("|")
assert len(l) == 4 or len(l) == 5
self.multi_speaker = len(l) == 5
texts = list(map(lambda l: l.decode("utf-8").split("|")[3], lines))
if self.multi_speaker:
speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines))
# Filter by speaker_id
# using multi-speaker dataset as a single speaker dataset
if self.speaker_id is not None:
indices = np.array(speaker_ids) == self.speaker_id
texts = list(np.array(texts)[indices])
self.multi_speaker = False
return texts
return texts, speaker_ids
else:
return texts
def collect_features(self, *args):
if self.multi_speaker:
text, speaker_id = args
else:
text = args[0]
seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob)
if self.multi_speaker:
return np.asarray(seq, dtype=np.int32), int(speaker_id)
else:
return np.asarray(seq, dtype=np.int32)
class _NPYDataSource(FileDataSource):
def __init__(self, data_root, col, speaker_id=None):
self.data_root = data_root
self.col = col
self.frame_lengths = []
self.speaker_id = speaker_id
def collect_files(self):
meta = join(self.data_root, "train.txt")
with open(meta, "rb") as f:
lines = f.readlines()
l = lines[0].decode("utf-8").split("|")
assert len(l) == 4 or len(l) == 5
multi_speaker = len(l) == 5
self.frame_lengths = list(
map(lambda l: int(l.decode("utf-8").split("|")[2]), lines))
paths = list(map(lambda l: l.decode("utf-8").split("|")[self.col], lines))
paths = list(map(lambda f: join(self.data_root, f), paths))
if multi_speaker and self.speaker_id is not None:
speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines))
# Filter by speaker_id
# using multi-speaker dataset as a single speaker dataset
indices = np.array(speaker_ids) == self.speaker_id
paths = list(np.array(paths)[indices])
self.frame_lengths = list(np.array(self.frame_lengths)[indices])
# aha, need to cast numpy.int64 to int
self.frame_lengths = list(map(int, self.frame_lengths))
return paths
def collect_features(self, path):
return np.load(path)
class MelSpecDataSource(_NPYDataSource):
def __init__(self, data_root, speaker_id=None):
super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id)
class LinearSpecDataSource(_NPYDataSource):
def __init__(self, data_root, speaker_id=None):
super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id)
class PartialyRandomizedSimilarTimeLengthSampler(Sampler):
"""Partially randmoized sampler
1. Sort by lengths
2. Pick a small patch and randomize it
3. Permutate mini-batchs
"""
def __init__(self, lengths, batch_size=16, batch_group_size=None,
permutate=True):
self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))
self.batch_size = batch_size
if batch_group_size is None:
batch_group_size = min(batch_size * 32, len(self.lengths))
if batch_group_size % batch_size != 0:
batch_group_size -= batch_group_size % batch_size
self.batch_group_size = batch_group_size
assert batch_group_size % batch_size == 0
self.permutate = permutate
def __iter__(self):
indices = self.sorted_indices.clone()
batch_group_size = self.batch_group_size
s, e = 0, 0
for i in range(len(indices) // batch_group_size):
s = i * batch_group_size
e = s + batch_group_size
random.shuffle(indices[s:e])
# Permutate batches
if self.permutate:
perm = np.arange(len(indices[:e]) // self.batch_size)
random.shuffle(perm)
indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1)
# Handle last elements
s += batch_group_size
if s < len(indices):
random.shuffle(indices[s:])
return iter(indices)
def __len__(self):
return len(self.sorted_indices)
class PyTorchDataset(object):
def __init__(self, X, Mel, Y):
self.X = X
self.Mel = Mel
self.Y = Y
# alias
self.multi_speaker = X.file_data_source.multi_speaker
def __getitem__(self, idx):
if self.multi_speaker:
text, speaker_id = self.X[idx]
return text, self.Mel[idx], self.Y[idx], speaker_id
else:
return self.X[idx], self.Mel[idx], self.Y[idx]
def __len__(self):
return len(self.X)
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1) \
.expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand).float()
class MaskedL1Loss(nn.Module):
def __init__(self):
super(MaskedL1Loss, self).__init__()
self.criterion = nn.L1Loss(size_average=False)
def forward(self, input, target, lengths=None, mask=None, max_len=None):
if lengths is None and mask is None:
raise RuntimeError("Should provide either lengths or mask")
# (B, T, 1)
if mask is None:
mask = sequence_mask(lengths, max_len).unsqueeze(-1)
# (B, T, D)
mask_ = mask.expand_as(input)
loss = self.criterion(input * mask_, target * mask_)
return loss / mask_.sum()
def collate_fn(batch):
"""Create batch"""
r = hparams.outputs_per_step
downsample_step = hparams.downsample_step
multi_speaker = len(batch[0]) == 4
# Lengths
input_lengths = [len(x[0]) for x in batch]
max_input_len = max(input_lengths)
target_lengths = [len(x[1]) for x in batch]
max_target_len = max(target_lengths)
if max_target_len % r != 0:
max_target_len += r - max_target_len % r
assert max_target_len % r == 0
if max_target_len % downsample_step != 0:
max_target_len += downsample_step - max_target_len % downsample_step
assert max_target_len % downsample_step == 0
# Set 0 for zero beginning padding
# imitates initial decoder states
b_pad = r
max_target_len += b_pad * downsample_step
a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int)
x_batch = torch.LongTensor(a)
input_lengths = torch.LongTensor(input_lengths)
target_lengths = torch.LongTensor(target_lengths)
b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch],
dtype=np.float32)
mel_batch = torch.FloatTensor(b)
c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch],
dtype=np.float32)
y_batch = torch.FloatTensor(c)
# text positions
text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len)
for x in batch], dtype=np.int)
text_positions = torch.LongTensor(text_positions)
max_decoder_target_len = max_target_len // r // downsample_step
# frame positions
s, e = 1, max_decoder_target_len + 1
# if b_pad > 0:
# s, e = s - 1, e - 1
frame_positions = torch.arange(s, e).long().unsqueeze(0).expand(
len(batch), max_decoder_target_len)
# done flags
done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1),
max_decoder_target_len, constant_values=1)
for x in batch])
done = torch.FloatTensor(done).unsqueeze(-1)
if multi_speaker:
speaker_ids = torch.LongTensor([x[3] for x in batch])
else:
speaker_ids = None
return x_batch, input_lengths, mel_batch, y_batch, \
(text_positions, frame_positions), done, target_lengths, speaker_ids
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def save_alignment(path, attn):
plot_alignment(attn.T, path, info="{}, {}, step={}".format(
hparams.builder, time_string(), global_step))
def prepare_spec_image(spectrogram):
# [0, 1]
spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram))
spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis
return np.uint8(cm.magma(spectrogram.T) * 255)
def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):
# harded coded
texts = [
"Scientists at the CERN laboratory say they have discovered a new particle.",
"There's a way to measure the acute emotional intelligence that has never gone out of style.",
"President Trump met with other leaders at the Group of 20 conference.",
"Generative adversarial network or variational auto-encoder.",
"Please call Stella.",
"Some have accepted this as a miracle without any physical explanation.",
]
import dv3.synthesis
synthesis._frontend = _frontend
eval_output_dir = join(checkpoint_dir, "eval")
os.makedirs(eval_output_dir, exist_ok=True)
# hard coded
speaker_ids = [0, 1, 10] if ismultispeaker else [None]
for speaker_id in speaker_ids:
speaker_str = "multispeaker{}".format(speaker_id) if speaker_id is not None else "single"
for idx, text in enumerate(texts):
signal, alignment, _, mel = synthesis.tts(
model, text, p=0, speaker_id=speaker_id, fast=False)
signal /= np.max(np.abs(signal))
# Alignment
path = join(eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
global_step, idx, speaker_str))
save_alignment(path, alignment)
tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Mel
writer.add_image("(Eval) Predicted mel spectrogram text{}_{}".format(idx, speaker_str),
prepare_spec_image(mel), global_step)
# Audio
path = join(eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
global_step, idx, speaker_str))
dv3.audio.save_wav(signal, path)
try:
writer.add_audio("(Eval) Predicted audio signal {}_{}".format(idx, speaker_str),
signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y,
input_lengths, checkpoint_dir=None):
print("Save intermediate states at step {}".format(global_step))
# idx = np.random.randint(0, len(input_lengths))
idx = min(1, len(input_lengths) - 1)
input_length = input_lengths[idx]
# Alignment
# Multi-hop attention
if attn is not None and attn.dim() == 4:
for i, alignment in enumerate(attn):
alignment = alignment[idx].cpu().data.numpy()
tag = "alignment_layer{}".format(i + 1)
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# save files as well for now
alignment_dir = join(checkpoint_dir, "alignment_layer{}".format(i + 1))
os.makedirs(alignment_dir, exist_ok=True)
path = join(alignment_dir, "step{:09d}_layer_{}_alignment.png".format(
global_step, i + 1))
save_alignment(path, alignment)
# Save averaged alignment
alignment_dir = join(checkpoint_dir, "alignment_ave")
os.makedirs(alignment_dir, exist_ok=True)
path = join(alignment_dir, "step{:09d}_alignment.png".format(global_step))
alignment = attn.mean(0)[idx].cpu().data.numpy()
save_alignment(path, alignment)
tag = "averaged_alignment"
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Predicted mel spectrogram
if mel_outputs is not None:
mel_output = mel_outputs[idx].cpu().data.numpy()
mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))
writer.add_image("Predicted mel spectrogram", mel_output, global_step)
# Predicted spectrogram
if linear_outputs is not None:
linear_output = linear_outputs[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))
writer.add_image("Predicted linear spectrogram", spectrogram, global_step)
# Predicted audio signal
signal = dv3.audio.inv_spectrogram(linear_output.T)
signal /= np.max(np.abs(signal))
path = join(checkpoint_dir, "step{:09d}_predicted.wav".format(
global_step))
try:
writer.add_audio("Predicted audio signal", signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
dv3.audio.save_wav(signal, path)
# Target mel spectrogram
if mel_outputs is not None:
mel_output = mel[idx].cpu().data.numpy()
mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))
writer.add_image("Target mel spectrogram", mel_output, global_step)
# Target spectrogram
if linear_outputs is not None:
linear_output = y[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))
writer.add_image("Target linear spectrogram", spectrogram, global_step)
def logit(x, eps=1e-8):
return torch.log(x + eps) - torch.log(1 - x + eps)
def masked_mean(y, mask):
# (B, T, D)
mask_ = mask.expand_as(y)
return (y * mask_).sum() / mask_.sum()
def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0):
masked_l1 = MaskedL1Loss()
l1 = nn.L1Loss()
w = hparams.masked_loss_weight
# L1 loss
if w > 0:
assert mask is not None
l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat, y)
else:
assert mask is None
l1_loss = l1(y_hat, y)
# Priority L1 loss
if priority_bin is not None and priority_w > 0:
if w > 0:
priority_loss = w * masked_l1(
y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \
+ (1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])
else:
priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])
l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss
# Binary divergence loss
if hparams.binary_divergence_weight <= 0:
binary_div = Variable(y.data.new(1).zero_())
else:
y_hat_logits = logit(y_hat)
z = -y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits))
if w > 0:
binary_div = w * masked_mean(z, mask) + (1 - w) * z.mean()
else:
binary_div = z.mean()
return l1_loss, binary_div
@jit(nopython=True)
def guided_attention(N, max_N, T, max_T, g):
W = np.zeros((max_N, max_T), dtype=np.float32)
for n in range(N):
for t in range(T):
W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))
return W
def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2):
B = len(input_lengths)
max_input_len = input_lengths.max()
W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32)
for b in range(B):
W[b] = guided_attention(input_lengths[b], max_input_len,
target_lengths[b], max_target_len, g).T
return W
def train(model, data_loader, optimizer, writer,
init_lr=0.002,
checkpoint_dir=None, checkpoint_interval=None, nepochs=None,
clip_thresh=1.0,
train_seq2seq=True, train_postnet=True):
if use_cuda:
model = model.cuda()
linear_dim = model.linear_dim
r = hparams.outputs_per_step
downsample_step = hparams.downsample_step
current_lr = init_lr
binary_criterion = nn.BCELoss()
assert train_seq2seq or train_postnet
global global_step, global_epoch
while global_epoch < nepochs:
running_loss = 0.
for step, (x, input_lengths, mel, y, positions, done, target_lengths,
speaker_ids) \
in tqdm(enumerate(data_loader)):
model.train()
ismultispeaker = speaker_ids is not None
# Learning rate schedule
if hparams.lr_schedule is not None:
lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule)
current_lr = lr_schedule_f(
init_lr, global_step, **hparams.lr_schedule_kwargs)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
optimizer.zero_grad()
# Used for Position encoding
text_positions, frame_positions = positions
# Downsample mel spectrogram
if downsample_step > 1:
mel = mel[:, 0::downsample_step, :].contiguous()
# Lengths
input_lengths = input_lengths.long().numpy()
decoder_lengths = target_lengths.long().numpy() // r // downsample_step
# Feed data
x, mel, y = Variable(x), Variable(mel), Variable(y)
text_positions = Variable(text_positions)
frame_positions = Variable(frame_positions)
done = Variable(done)
target_lengths = Variable(target_lengths)
speaker_ids = Variable(speaker_ids) if ismultispeaker else None
if use_cuda:
if train_seq2seq:
x = x.cuda()
text_positions = text_positions.cuda()
frame_positions = frame_positions.cuda()
if train_postnet:
y = y.cuda()
mel = mel.cuda()
done, target_lengths = done.cuda(), target_lengths.cuda()
speaker_ids = speaker_ids.cuda() if ismultispeaker else None
# Create mask if we use masked loss
if hparams.masked_loss_weight > 0:
# decoder output domain mask
decoder_target_mask = sequence_mask(
target_lengths / (r * downsample_step),
max_len=mel.size(1)).unsqueeze(-1)
if downsample_step > 1:
# spectrogram-domain mask
target_mask = sequence_mask(
target_lengths, max_len=y.size(1)).unsqueeze(-1)
else:
target_mask = decoder_target_mask
# shift mask
decoder_target_mask = decoder_target_mask[:, r:, :]
target_mask = target_mask[:, r:, :]
else:
decoder_target_mask, target_mask = None, None
# Apply model
if train_seq2seq and train_postnet:
mel_outputs, linear_outputs, attn, done_hat = model(
x, mel, speaker_ids=speaker_ids,
text_positions=text_positions, frame_positions=frame_positions,
input_lengths=input_lengths)
elif train_seq2seq:
assert speaker_ids is None
mel_outputs, attn, done_hat, _ = model.seq2seq(
x, mel,
text_positions=text_positions, frame_positions=frame_positions,
input_lengths=input_lengths)
# reshape
mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1))
linear_outputs = None
elif train_postnet:
assert speaker_ids is None
linear_outputs = model.postnet(mel)
mel_outputs, attn, done_hat = None, None, None
# Losses
w = hparams.binary_divergence_weight
# mel:
if train_seq2seq:
mel_l1_loss, mel_binary_div = spec_loss(
mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask)
mel_loss = (1 - w) * mel_l1_loss + w * mel_binary_div
# done:
if train_seq2seq:
done_loss = binary_criterion(done_hat, done)
# linear:
if train_postnet:
n_priority_freq = int(hparams.priority_freq / (fs * 0.5) * linear_dim)
linear_l1_loss, linear_binary_div = spec_loss(
linear_outputs[:, :-r, :], y[:, r:, :], target_mask,
priority_bin=n_priority_freq,
priority_w=hparams.priority_freq_weight)
linear_loss = (1 - w) * linear_l1_loss + w * linear_binary_div
# Combine losses
if train_seq2seq and train_postnet:
loss = mel_loss + linear_loss + done_loss
elif train_seq2seq:
loss = mel_loss + done_loss
elif train_postnet:
loss = linear_loss
# attention
if train_seq2seq and hparams.use_guided_attention:
soft_mask = guided_attentions(input_lengths, decoder_lengths,
attn.size(-2),
g=hparams.guided_attention_sigma)
soft_mask = Variable(torch.from_numpy(soft_mask))
soft_mask = soft_mask.cuda() if use_cuda else soft_mask
attn_loss = (attn * soft_mask).mean()
loss += attn_loss
if global_step > 0 and global_step % checkpoint_interval == 0:
save_states(
global_step, writer, mel_outputs, linear_outputs, attn,
mel, y, input_lengths, checkpoint_dir)
save_checkpoint(
model, optimizer, global_step, checkpoint_dir, global_epoch,
train_seq2seq, train_postnet)
if global_step > 0 and global_step % hparams.eval_interval == 0:
eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker)
# Update
loss.backward()
if clip_thresh > 0:
grad_norm = torch.nn.utils.clip_grad_norm(
model.get_trainable_parameters(), clip_thresh)
optimizer.step()
# Logs
writer.add_scalar("loss", float(loss.data[0]), global_step)
if train_seq2seq:
writer.add_scalar("done_loss", float(done_loss.data[0]), global_step)
writer.add_scalar("mel loss", float(mel_loss.data[0]), global_step)
writer.add_scalar("mel_l1_loss", float(mel_l1_loss.data[0]), global_step)
writer.add_scalar("mel_binary_div_loss", float(mel_binary_div.data[0]), global_step)
if train_postnet:
writer.add_scalar("linear_loss", float(linear_loss.data[0]), global_step)
writer.add_scalar("linear_l1_loss", float(linear_l1_loss.data[0]), global_step)
writer.add_scalar("linear_binary_div_loss", float(
linear_binary_div.data[0]), global_step)
if train_seq2seq and hparams.use_guided_attention:
writer.add_scalar("attn_loss", float(attn_loss.data[0]), global_step)
if clip_thresh > 0:
writer.add_scalar("gradient norm", grad_norm, global_step)
writer.add_scalar("learning rate", current_lr, global_step)
global_step += 1
running_loss += loss.data[0]
averaged_loss = running_loss / (len(data_loader))
writer.add_scalar("loss (per epoch)", averaged_loss, global_epoch)
print("Loss: {}".format(running_loss / (len(data_loader))))
global_epoch += 1
def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch,
train_seq2seq, train_postnet):
if train_seq2seq and train_postnet:
suffix = ""
m = model
elif train_seq2seq:
suffix = "_seq2seq"
m = model.seq2seq
elif train_postnet:
suffix = "_postnet"
m = model.postnet
checkpoint_path = join(
checkpoint_dir, "checkpoint_step{:09d}{}.pth".format(global_step, suffix))
optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None
torch.save({
"state_dict": m.state_dict(),
"optimizer": optimizer_state,
"global_step": step,
"global_epoch": epoch,
}, checkpoint_path)
print("Saved checkpoint:", checkpoint_path)
def build_model():
model = getattr(builder, hparams.builder)(
n_speakers=hparams.n_speakers,
speaker_embed_dim=hparams.speaker_embed_dim,
n_vocab=_frontend.n_vocab,
embed_dim=hparams.text_embed_dim,
mel_dim=hparams.num_mels,
linear_dim=hparams.fft_size // 2 + 1,
r=hparams.outputs_per_step,
downsample_step=hparams.downsample_step,
padding_idx=hparams.padding_idx,
dropout=hparams.dropout,
kernel_size=hparams.kernel_size,
encoder_channels=hparams.encoder_channels,
decoder_channels=hparams.decoder_channels,
converter_channels=hparams.converter_channels,
use_memory_mask=hparams.use_memory_mask,
trainable_positional_encodings=hparams.trainable_positional_encodings,
force_monotonic_attention=hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input,
max_positions=hparams.max_positions,
speaker_embedding_weight_std=hparams.speaker_embedding_weight_std,
freeze_embedding=hparams.freeze_embedding,
window_ahead=hparams.window_ahead,
window_backward=hparams.window_backward,
key_projection=hparams.key_projection,
value_projection=hparams.value_projection,
)
return model
def load_checkpoint(path, model, optimizer, reset_optimizer):
global global_step
global global_epoch
print("Load checkpoint from: {}".format(path))
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["state_dict"])
if not reset_optimizer:
optimizer_state = checkpoint["optimizer"]
if optimizer_state is not None:
print("Load optimizer state from {}".format(path))
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["global_step"]
global_epoch = checkpoint["global_epoch"]
return model
def _load_embedding(path, model):
state = torch.load(path)["state_dict"]
key = "seq2seq.encoder.embed_tokens.weight"
model.seq2seq.encoder.embed_tokens.weight.data = state[key]
# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3
def restore_parts(path, model):
print("Restore part of the model from: {}".format(path))
state = torch.load(path)["state_dict"]
model_dict = model.state_dict()
valid_state_dict = {k: v for k, v in state.items() if k in model_dict}
model_dict.update(valid_state_dict)
model.load_state_dict(model_dict)
if __name__ == "__main__":
args = docopt(__doc__)
print("Command line args:\n", args)
checkpoint_dir = args["--checkpoint-dir"]
checkpoint_path = args["--checkpoint"]
checkpoint_seq2seq_path = args["--checkpoint-seq2seq"]
checkpoint_postnet_path = args["--checkpoint-postnet"]
load_embedding = args["--load-embedding"]
checkpoint_restore_parts = args["--restore-parts"]
speaker_id = args["--speaker-id"]
speaker_id = int(speaker_id) if speaker_id is not None else None
data_root = args["--data-root"]
if data_root is None:
data_root = join(dirname(__file__), "data", "ljspeech")
log_event_path = args["--log-event-path"]
reset_optimizer = args["--reset-optimizer"]
# Which model to be trained
train_seq2seq = args["--train-seq2seq-only"]
train_postnet = args["--train-postnet-only"]
# train both if not specified
if not train_seq2seq and not train_postnet:
print("Training whole model")
train_seq2seq, train_postnet = True, True
if train_seq2seq:
print("Training seq2seq model")
elif train_postnet:
print("Training postnet model")
else:
assert False, "must be specified wrong args"
# Override hyper parameters
hparams.parse(args["--hparams"])
print(hparams_debug_string())
assert hparams.name == "deepvoice3"
# Presets
if hparams.preset is not None and hparams.preset != "":
preset = hparams.presets[hparams.preset]
import json
hparams.parse_json(json.dumps(preset))
print("Override hyper parameters with preset \"{}\": {}".format(
hparams.preset, json.dumps(preset, indent=4)))
_frontend = getattr(frontend, hparams.frontend)
os.makedirs(checkpoint_dir, exist_ok=True)
# Input dataset definitions
X = FileSourceDataset(TextDataSource(data_root, speaker_id))
Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id))
Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id))
# Prepare sampler
frame_lengths = Mel.file_data_source.frame_lengths
sampler = PartialyRandomizedSimilarTimeLengthSampler(
frame_lengths, batch_size=hparams.batch_size)
# Dataset and Dataloader setup
dataset = PyTorchDataset(X, Mel, Y)
data_loader = data_utils.DataLoader(
dataset, batch_size=hparams.batch_size,
num_workers=hparams.num_workers, sampler=sampler,
collate_fn=collate_fn, pin_memory=hparams.pin_memory)
print("dataloader_prepared")
# Model
model = build_model()
if use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.get_trainable_parameters(),
lr=hparams.initial_learning_rate, betas=(
hparams.adam_beta1, hparams.adam_beta2),
eps=hparams.adam_eps, weight_decay=hparams.weight_decay)
if checkpoint_restore_parts is not None:
restore_parts(checkpoint_restore_parts, model)
# Load checkpoints
if checkpoint_postnet_path is not None:
load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer)
if checkpoint_seq2seq_path is not None:
load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer)
if checkpoint_path is not None:
load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)
# Load embedding
if load_embedding is not None:
print("Loading embedding from {}".format(load_embedding))
_load_embedding(load_embedding, model)
# Setup summary writer for tensorboard
if log_event_path is None:
log_event_path = "log/run-test" + str(datetime.now()).replace(" ", "_")
print("Los event path: {}".format(log_event_path))
writer = SummaryWriter(log_dir=log_event_path)
# Train!
try:
train(model, data_loader, optimizer, writer,
init_lr=hparams.initial_learning_rate,
checkpoint_dir=checkpoint_dir,
checkpoint_interval=hparams.checkpoint_interval,
nepochs=hparams.nepochs,
clip_thresh=hparams.clip_thresh,
train_seq2seq=train_seq2seq, train_postnet=train_postnet)
except KeyboardInterrupt:
save_checkpoint(
model, optimizer, global_step, checkpoint_dir, global_epoch,
train_seq2seq, train_postnet)
print("Finished")
sys.exit(0)
| 2.359375 | 2 |
magic_mirror.py | alcinnz/Historical-Twin | 1 | 2691 | #! /usr/bin/python2
import time
start = time.time()
import pygame, numpy
import pygame.camera
# Init display
screen = pygame.display.set_mode((0,0), pygame.FULLSCREEN)
pygame.display.set_caption("Magic Mirror")
#pygame.mouse.set_visible(False)
# Init font
pygame.font.init()
font_colour = 16, 117, 186
fonts = {40: pygame.font.Font("Futura.ttc", 40)}
def font(font_size = 40):
if font_size not in fonts:
fonts[font_size] = pygame.font.Font("Futura.ttc", font_size)
return fonts[font_size]
def write(text, colour = font_colour, font_size = 40):
return font(font_size).render(str(text), True, colour)
# Init AI
import recognition
import sys, os
def find_faces(pygame_capture):
capture = numpy.array(pygame.surfarray.pixels3d(pygame_capture))
capture = numpy.swapaxes(capture, 0, 1)
return recognition.align.getAllFaceBoundingBoxes(capture), capture
index = recognition.MultiBinaryTree()
imgdir = sys.argv[1] if len(sys.argv) > 1 else "images"
photo_samples = []
screen.blit(write("Loading index... %fs" % (time.time() - start)), (0,0))
pygame.display.flip()
with open(os.path.join(imgdir, "index.tsv")) as f:
for line in f:
line = line.strip().split("\t")
img = os.path.join(imgdir, line[0])
description = numpy.array([float(n) for n in line[1:]])
index.insert(description, img)
screen.blit(write("Loading images... %fs" % (time.time() - start)), (0,50))
pygame.display.flip()
for img in os.listdir(os.path.join(imgdir, "thumbnails")):
photo_samples.append(pygame.image.load(os.path.join(imgdir, "thumbnails", img)))
# Init clock
clock = pygame.time.Clock()
# Init camera
pygame.camera.init()
cameras = pygame.camera.list_cameras()
if not cameras:
pygame.quit()
print "No cameras found, exiting!"
sys.exit(1)
camera = pygame.camera.Camera(cameras[0])
camera.start()
# Mainloop
def recognize(capture, faces):
fullscreen = pygame.Rect(0, 0, screen.get_width(), screen.get_height())
pygame.draw.rect(screen, (255, 255, 255), fullscreen)
pygame.display.flip()
face = recognition.average(recognition.getRepBBox(capture, face) for face in faces)
img = index.nearest(face)
screen.blit(pygame.image.load(img), (0,0))
pygame.display.flip()
pygame.time.wait(10*1000) # 30s
def main():
countdown = 10
lastFaceCount = 0
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type in (pygame.QUIT, pygame.KEYDOWN):
return
capture = camera.get_image()
faces, capture_data = find_faces(capture)
for bbox in faces:
rect = pygame.Rect(bbox.left(), bbox.top(), bbox.width(), bbox.height())
pygame.draw.rect(capture, (255, 0, 0), rect, 2)
capture = pygame.transform.flip(capture, True, False)
screen.blit(pygame.transform.smoothscale(capture, screen.get_size()), (0,0))
if len(faces) == 0 or len(faces) != lastFaceCount:
countdown = 10
lastFaceCount = len(faces)
elif countdown == 0:
recognize(capture_data, faces)
countdown = 10
else:
screen.blit(write(countdown), (0,0))
countdown -= 1
pygame.display.flip()
pygame.quit()
if __name__ == "__main__":
main()
| 2.671875 | 3 |
resolwe/__init__.py | plojyon/resolwe | 27 | 2692 | <reponame>plojyon/resolwe<gh_stars>10-100
""".. Ignore pydocstyle D400.
=======
Resolwe
=======
Open source enterprise dataflow engine in Django.
"""
from resolwe.__about__ import ( # noqa: F401
__author__,
__copyright__,
__email__,
__license__,
__summary__,
__title__,
__url__,
__version__,
)
| 1.21875 | 1 |
audio_som64_u_grupo1.py | andremsouza/swine_sound_analysis | 0 | 2693 | # %% [markdown]
# # Testing python-som with audio dataset
# %% [markdown]
# # Imports
# %%
import matplotlib.pyplot as plt
# import librosa as lr
# import librosa.display as lrdisp
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
import sklearn.preprocessing
from python_som import SOM
FILE_PREFIX = 'som64_u_grupo1'
# %% [markdown]
# # Loading dataset
# %%
df = pd.read_csv('features_means.csv', index_col=0, verbose=True)
df.index = pd.to_datetime(df.index)
df['rac'] = False
df.loc['2020-09-22':, 'rac'] = True # type: ignore
df.sort_index(inplace=True)
# %% [markdown]
# ## Checking for and dropping duplicates
# %%
# Resetting index for duplicate analysis
df.reset_index(inplace=True)
print("Duplicates by filename:",
df.duplicated(subset=['file_name']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['file_name'], inplace=True)
print("Duplicates by (datetime, ala, grupo):",
df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True)
# Rebuilding dataframe index
df.set_index('datetime', inplace=True)
# %%
# Filtering dataset by 'group'
df = df[df['grupo'] == 1]
# %%
# Dropping tail of dataset for class balancing
# tail_size = abs(
# len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0]))
# df.drop(df.tail(tail_size).index, inplace=True)
# %% [markdown]
# ## Visualizing distribution of sample dates
# %%
df_tmp = pd.DataFrame(df['file_name'].resample('1D').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])
plt.draw()
df_tmp = pd.DataFrame(df['file_name'].resample('1H').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
df_tmp = df_tmp.reset_index()
df_tmp['hour'] = df_tmp['datetime'].dt.hour
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')
plt.draw()
# %%
df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine')
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine')
for p in ax.patches:
ax.annotate(f'\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()),
ha='center',
va='top',
color='white',
size=18)
plt.draw()
# %%
# using sklearn's MinMaxScaler
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1))
df_train = df.iloc[:, 3:-1].copy()
df_train = scaler.fit_transform(df_train)
# %%
# Defining first element of SOM shape
# Second element will be assigned based on the ratio between the
# first two principal components of the train dataset
som_x: int = 64
try:
with open(f'./{FILE_PREFIX}.obj', 'rb') as f:
som = pickle.load(f)
except FileNotFoundError:
som = SOM(x=som_x,
y=None,
input_len=df_train.shape[1],
learning_rate=0.5,
neighborhood_radius=1.0,
neighborhood_function='gaussian',
cyclic_x=True,
cyclic_y=True,
data=df_train)
# Training SOM
som.weight_initialization(mode='linear', data=df_train)
som.train(data=df_train, mode='random', verbose=True)
with open(f'./{FILE_PREFIX}.obj', 'wb') as f:
pickle.dump(som, f)
# %%
som_x, som_y = som.get_shape()
print('SOM shape:', (som_x, som_y))
# %%
# Visualizing distance matrix and activation matrix
umatrix = som.distance_matrix()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True)
sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
ax=ax2,
robust=True)
ax1.invert_yaxis()
ax2.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png',
bbox_inches='tight',
transparent=True)
plt.draw()
# %%
# Visualizing distance matrix anc activation matrix separately
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png',
bbox_inches='tight',
transparent=True)
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png',
bbox_inches='tight',
transparent=True)
# %% [markdown]
# ## Visualizing distribution of features
# %%
for column in df.iloc[:, 3:-1].columns:
hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, robust=True, cmap='BrBG')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.close(fig=fig)
# %% [markdown]
# ## Visualizing distribution of audios by metadata (day, hour, ...)
# Each node is colorized according to its most frequent label
# %%
df['days'] = df.index.date
df['days'] = (df['days'] - df['days'][0])
df['days'] = df['days'].apply(lambda x: x.days)
df['hour'] = df.index.hour
# %%
# Visualizing 'rac' distribution
class_assignments = som.label_map(np.array(df_train), np.array(df['rac']))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1
except Exception:
continue
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'grupo'
print(df.groupby('grupo')['rac'].count())
column = 'grupo'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = 0
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'days'
print(df.groupby('days')['rac'].count())
column = 'days'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, cmap='viridis')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'hour'
print(df.groupby('hour')['rac'].count())
column = 'hour'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.diverging_palette(150,
250,
s=100,
l=20,
sep=1,
n=26,
center='light'),
center=12)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
| 2.640625 | 3 |
footy/engine/UpdateEngine.py | dallinb/footy | 2 | 2694 | """Prediction Engine - Update the data model with the most resent fixtures and results."""
from footy.domain import Competition
class UpdateEngine:
"""Prediction Engine - Update the data model with the most resent fixtures and results."""
def __init__(self):
"""Construct a UpdateEngine object."""
def get_competition(self, code):
"""
Retrieve data for the supplied competition code.
Returns
-------
Competition
A Competition object with the most recent fixtures and results for the supplied competition code.
"""
# return Competition
return Competition
def update_competition(self, competition):
"""
Retrieve data and enrich the supplied competition with the most recent fixtures and results.
Returns
-------
Competition
A Competition object with the most recent fixtures and results for the supplied competition code.
"""
return Competition
| 3.078125 | 3 |
bindings/pydeck/docs/scripts/embed_examples.py | marsupialmarcos/deck.gl | 2 | 2695 | """Script to embed pydeck examples into .rst pages with code
These populate the files you see once you click into a grid cell
on the pydeck gallery page
"""
from multiprocessing import Pool
import os
import subprocess
import sys
from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH
from utils import to_presentation_name, to_snake_case_string
from templates import DOC_TEMPLATE
if not os.environ.get("MAPBOX_API_KEY"):
# If running for rtfd.io, set this variable from the Admin panel
raise Exception("MAPBOX_API_KEY not set")
def create_rst(pydeck_example_file_name):
asset_name = to_snake_case_string(file_name=pydeck_example_file_name)
deckgl_docs_layer_name = asset_name.replace("_", "-")
deckgl_doc_url = None
if "layer" in deckgl_docs_layer_name:
# Don't add a deck.gl docs link if we're not referencing a layer
# Obviously very rough, should change this eventually to handle views etc
deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name
# Create new .html examples
html_fname = os.path.basename(pydeck_example_file_name).replace(".py", ".html")
# Run the pydeck example and move the .html output
subprocess.call(
"{python} {fname}; mv {html_src} {html_dest}".format(
python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR
),
shell=True,
)
python_code = open(pydeck_example_file_name, "r").read()
doc_source = DOC_TEMPLATE.render(
page_title=to_presentation_name(asset_name),
snake_name=asset_name,
python_code=python_code,
hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname),
deckgl_doc_url=deckgl_doc_url,
)
rst_path = os.path.join(GALLERY_DIR, asset_name + ".rst")
f = open(rst_path, "w+")
print("* Converted %s to %s" % (pydeck_example_file_name, rst_path))
f.write(doc_source)
f.close()
def main():
pool = Pool(processes=4)
candidate_files = [f for f in EXAMPLE_GLOB]
if not candidate_files:
raise Exception("No files found to convert")
subprocess.call("mkdir -p %s" % HTML_DIR, shell=True)
pool.map(create_rst, candidate_files)
if __name__ == "__main__":
main()
| 2.765625 | 3 |
symbolicR/python/forward_kin.py | mharding01/augmented-neuromuscular-RT-running | 0 | 2696 | <filename>symbolicR/python/forward_kin.py
import numpy as np
import sympy as sp
import re
import os
######################
# #
# 17 16 21 #
# 18 15 22 #
# 19 14 23 #
# 20 01 24 #
# 02 08 #
# 03 09 #
# 04 10 #
# 05 11 #
# 06 12 #
# 07 13 #
# #
######################
#
# origin: in the waist, middle point between the two pitch hip rotations
# inertial frame: located at the origin (waist), but aligned with the ground (info from IMU)
#
# Di : position vector from the anchor point of the previous body to the current body i
# (previous body is not always body i-1), expressed in the relative
# frame of the previous body
# DGi : position vector from the anchor point of body i to its COM (center of mass) G_i,
# expressed in the relative frame of the current body i
# Omi : rotational vector from the previous body to the current body i
# (previous body is not always body i-1), expressed in the relative
# frame of the previous body
# Rdi : rotational matrix between body i and its predecessor
# si : sine of the relative angle before body i
# ci : cosine of the relative angle before body i
#
# xi : absolute position vector (from origin, expressed in the inertial frame)
# of the anchor point of body i
# xgi : absolute position vector of the COM G_i of body i
# xpi : derivative of xi
# xgpi : derivative of xgi
# omi : absolute rotational vector of body i
# Ri : absolute rotational matrix
# Rti : transpose matrix of Ri
# xji : jacobian of 'xi'
# xgji : jacobian of 'xgi'
# Rji : jacobian of 'Ri'
# return true if it is a float
def isInt(value):
try:
int(value)
return True
except:
return False
# return true if it has a shape 'R%a_%b%c' (indexes %a, %b, %c also returned)
def isRot(value):
try:
a = int(value.split('_')[0].split('R')[1])
b = int(value.split('_')[1][0])
c = int(value.split('_')[1][1])
return True, a, b, c
except:
return False, -1, -1, -1
# return true if it has a shape 'x%a_%b' (indexes %a, %b also returned)
def isVec(value):
try:
a = int(value.split('_')[0].split('x')[1])
b = int(value.split('_')[1])
return True, a, b
except:
return False, -1, -1
# count the number of 'elem' in the file
def count_elem(in_file, elem):
count = 0;
with open(in_file, 'r') as f:
# loop on all the lines
for line in f:
cut_line = line.split(elem)
if len(cut_line) == 2:
count += 1
return count
# print the declaration of an element
def print_declaration_elem(in_file, out_write, elem, nb_max_line):
if count_elem(in_file, '{}'.format(elem)) >= 1:
count = 0
with open(in_file,'r') as f:
# loop on all the lines
for line in f:
cut_line_1 = line.split(elem)
cut_line_2 = line.split(' = ')
if len(cut_line_1) == 2 and len(cut_line_2) == 2:
if len(cut_line_2[0].split('[')) == 1:
if count == 0:
out_write.write(' double {}'.format(cut_line_2[0].strip()))
else:
out_write.write(', {}'.format(cut_line_2[0].strip()))
count += 1
if count >= nb_max_line:
out_write.write(';\n')
count = 0
if count != 0:
out_write.write(';\n')
# print all declarations
def print_all_declaration(in_file, out_write, nb_max_char):
count = 0
with open(in_file,'r') as f:
# loop on all the lines
for line in f:
cut_line = line.split(' = ')
if len(cut_line) == 2:
if len(cut_line[0].split('[')) == 1:
if count == 0:
out_write.write(' double {}'.format(cut_line[0].strip()))
else:
out_write.write(', {}'.format(cut_line[0].strip()))
count += len(cut_line[0].strip()) + 2
if count >= nb_max_char:
out_write.write(';\n')
count = 0
if count != 0:
out_write.write(';\n')
# get tilde matrix
def get_tilde(v):
return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]])
# get rotation matrix
def get_rotation_matrix(axis, direct, cosine, sine):
if direct:
if axis == 1:
return np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]])
elif axis == 2:
return np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]])
elif axis == 3:
return np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]])
else:
return np.array([])
else:
if axis == 1:
return np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]])
elif axis == 2:
return np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]])
elif axis == 3:
return np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]])
else:
return np.array([])
# get vector axis
def get_vector_axis(axis, direct, elem):
if direct:
if axis == 1:
return np.array([[elem], [0.0], [0.0]])
elif axis == 2:
return np.array([[0.0], [elem], [0.0]])
elif axis == 3:
return np.array([[0.0], [0.0], [elem]])
else:
return np.array([])
else:
if axis == 1:
return np.array([[-elem], [0.0], [0.0]])
elif axis == 2:
return np.array([[0.0], [-elem], [0.0]])
elif axis == 3:
return np.array([[0.0], [0.0], [-elem]])
else:
return np.array([])
# compute the derivative of an element (for jacobian)
def der_elem(elem_str, Rj, xj, xgj, der_var):
# element to derive (string)
elem_str = elem_str.replace('- ','-').strip()
# derivative axis
der_q = int(der_var.replace('q',''))
# detect positive/negative
elem_split = elem_str.split('-')
cur_len = len(elem_split)
if cur_len == 1: # positive
neg_flag = 0
pos_str = elem_split[0]
elif cur_len == 2: # negative
neg_flag = 1
pos_str = elem_split[1]
else:
print('Error: {} instead of 1 or 2 in negative detection !'.format(cur_len))
exit()
# compute derivative
result = 0
# cosine
if pos_str == 'c{}'.format(der_q):
result += -sp.Symbol('s{}'.format(der_q))
# sine
elif pos_str == 's{}'.format(der_q):
result += sp.Symbol('c{}'.format(der_q))
# other
else:
[rot_flag, a, b, c] = isRot(pos_str)
[vec_flag, d, e] = isVec(pos_str)
# rotation matrix
if rot_flag:
result += Rj[a-1][der_q-1][(b-1)*3+(c-1)]
# vector
elif vec_flag:
result += xj[d-1][der_q-1][e-1]
# apply negative
if neg_flag:
result = -result
return result
# compute the derivative of an expression (for jacobian)
def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var):
# list of all terms
term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+')
if term_list[0] == '':
term_list.pop(0)
result = 0
# loop on all terms
for cur_term in term_list:
# detect products
cur_term_split = cur_term.split('*')
cur_len = len(cur_term_split)
# no product
if cur_len == 1:
result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)
# one product
elif cur_len == 2:
result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip())
result += der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip())
# other
else:
print('Error: {} * counted , only implemented for 0 or 1 !'.format(cur_len-1))
exit()
return result
# write the beginning of the file
def write_file_beginning(out_file, joint_id_names):
out_file.write('/*! \n')
out_file.write(' * \\author <NAME>\n')
out_file.write(' * \\file forward_kinematics.cc\n')
out_file.write(' * \\brief forward kinematics computation for the COMAN model\n')
out_file.write(' */\n\n')
out_file.write('// joints enumeration\n')
out_file.write('enum {')
count = 0
for i in range(1, len(joint_id_names)):
count += 1
if i == 1:
out_file.write('{}'.format(get_string_enum(joint_id_names[i])))
elif count >= 6:
count = 0
out_file.write(',\n {}'.format(get_string_enum(joint_id_names[i])))
else:
out_file.write(', {}'.format(get_string_enum(joint_id_names[i])))
out_file.write('};\n\n')
out_file.write('/*! \\brief main kinematics computation\n')
out_file.write(' *\n')
out_file.write(' * \\param[in,out] in_out inputs and outputs class\n')
out_file.write(' *\n')
out_file.write(' * computation of:\n')
out_file.write(' * COM (center of mass) position and velocity\n')
out_file.write(' * feet position, velocity and orientation\n')
out_file.write(' * waist and torso orientaion angles and derivatives\n')
out_file.write(' *\n')
out_file.write(' * ////////////////////////\n')
out_file.write(' * // //\n')
out_file.write(' * // 17 16 21 //\n')
out_file.write(' * // 18 15 22 //\n')
out_file.write(' * // 19 14 23 //\n')
out_file.write(' * // 20 01 24 //\n')
out_file.write(' * // 02 08 //\n')
out_file.write(' * // 03 09 //\n')
out_file.write(' * // 04 10 //\n')
out_file.write(' * // 05 11 //\n')
out_file.write(' * // 06 12 //\n')
out_file.write(' * // 07 13 //\n')
out_file.write(' * // //\n')
out_file.write(' * ////////////////////////\n')
out_file.write(' *\n')
out_file.write(' * origin: in the waist, middle point between the two pitch hip rotations\n')
out_file.write(' * inertial frame: located at the origin (waist), but aligned with the ground (info from IMU)\n')
out_file.write(' *\n')
out_file.write(' * Di : position vector from the anchor point of the previous body to the current body i \n')
out_file.write(' * (previous body is not always body i-1), expressed in the relative\n')
out_file.write(' * frame of the previous body\n')
out_file.write(' * DGi : position vector from the anchor point of body i to its COM (center of mass) G_i,\n')
out_file.write(' * expressed in the relative frame of the current body i\n')
out_file.write(' * Omi : rotational vector from the previous body to the current body i \n')
out_file.write(' * (previous body is not always body i-1), expressed in the relative\n')
out_file.write(' * frame of the previous body\n')
out_file.write(' * Rdi : rotational matrix between body i and its predecessor\n')
out_file.write(' * si : sine of the relative angle before body i\n')
out_file.write(' * ci : cosine of the relative angle before body i\n')
out_file.write(' *\n')
out_file.write(' * xi : absolute position vector (from origin, expressed in the inertial frame)\n')
out_file.write(' * of the anchor point of body i\n')
out_file.write(' * xgi : absolute position vector of the COM G_i of body i\n')
out_file.write(' * xpi : derivative of xi\n')
out_file.write(' * xgpi : derivative of xgi\n')
out_file.write(' * omi : absolute rotational vector of body i\n')
out_file.write(' * Ri : absolute rotational matrix\n')
out_file.write(' * Rti : transpose matrix of Ri\n')
out_file.write(' * xji : jacobian of \'xi\'\n')
out_file.write(' * xgji : jacobian of \'xgi\'\n')
out_file.write(' * Rji : jacobian of \'Ri\'\n')
out_file.write(' */\n')
out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\n{\n')
# compute the center of mass position and velocity
def com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj):
out_file.write(' m_tot = ')
for i in range(0, nb_bodies):
out_file.write('{}'.format(M[i]))
if i == nb_bodies-1:
out_file.write(';\n\n')
else:
out_file.write(' + ')
out_file.write(' // global com absolute position\n')
for i in range(0, 3):
out_file.write(' in_out.r_COM[{}] = '.format(i))
flag_first = 0
for j in range(0, nb_bodies):
if flag_first:
out_file.write(' + {}*{}'.format(M[j], xg[j][i]))
else:
flag_first = 1
out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1))
if j == nb_bodies-1:
if flag_first:
out_file.write(')/m_tot;\n')
else:
out_file.write('0.0;\n')
out_file.write('\n')
out_file.write(' // global com absolute velocity\n')
for i in range(0, 3):
out_file.write(' in_out.rp_COM[{}] = '.format(i))
flag_first = 0
for j in range(0, nb_bodies):
if flag_first:
out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1))
else:
flag_first = 1
out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1))
if j == nb_bodies-1:
if flag_first:
out_file.write(')/m_tot;\n')
else:
out_file.write('0.0;\n')
out_file.write('\n')
out_file.write(' // global com jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
for i in range(1, nb_bodies):
for j in range(0, 3):
out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j))
flag_first = 0
for k in range(0, nb_bodies):
if xgj[k][i][j] != 0:
if flag_first:
out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j])))
else:
flag_first = 1
out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j])))
if k == nb_bodies-1:
if flag_first:
out_file.write(')/m_tot;\n')
else:
out_file.write('0.0;\n')
if i != nb_bodies-1:
out_file.write('\n')
else:
out_file.write(' }\n\n')
# from an orientation matrix, compute the roll, pitch, yaw angles (and derivative)
def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon):
if epsilon > 0: # epsilon = 1 -> pitch angle in [-pi/2 ; pi/2]
out_file.write(' in_out.{}[0] = atan2({}, {});\n'.format(angle_name, R_matrix[5], R_matrix[8]))
out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1]))
out_file.write(' in_out.{}[2] = atan2({}, {});\n'.format(angle_name, R_matrix[1], R_matrix[0]))
else: # epsilon = -1 -> pitch angle in [pi/2 ; 3*pi/2]
out_file.write(' in_out.{}[0] = atan2(-{}, -{});\n'.format(angle_name, R_matrix[5], R_matrix[8]))
out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1]))
out_file.write(' in_out.{}[2] = atan2(-{}, -{});\n'.format(angle_name, R_matrix[1], R_matrix[0]))
# compute the time derivatives of 'yaw_pitch_roll_angles'
def theta_dot_compute(out_file, omega_in, omega_out, body_part):
out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1]))
out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0]))
out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{} * (s_z_{}*{} + c_z_{}*{}) + {};\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2]))
# angles (position and derivative) of the waist and the torso
def torso_waist_angles(out_file, R, om, waist_id, torso_id):
out_file.write(' // waist orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1)
out_file.write('\n')
out_file.write(' // torso orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1)
out_file.write('\n')
out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\n')
out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\n')
out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\n')
out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\n\n')
out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\n')
out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\n')
out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\n')
out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\n\n')
out_file.write(' if ((!c_y_waist) || (!c_y_torso))\n {\n')
out_file.write(' return;\n }\n\n')
out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\n')
out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\n\n')
out_file.write(' // waist orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist')
out_file.write('\n')
out_file.write(' // torso orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso')
# compute the feet position, velocity and orientation
def feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_foot_id, l_foot_id, x_min, x_max, y_min, y_max):
# symbolic variables declarations
nb_contacts = 4
x_r_foot = x[r_foot_id]
x_l_foot = x[l_foot_id]
xp_r_foot = xp[r_foot_id]
xp_l_foot = xp[l_foot_id]
om_r_foot = om[r_foot_id]
om_l_foot = om[l_foot_id]
R_r_foot = R[r_foot_id]
R_l_foot = R[l_foot_id]
Dpt_r_foot = sp.zeros(3, 1)
Dpt_l_foot = sp.zeros(3, 1)
Dpt_r_foot[2] = sp.Symbol('DPT_3_16')
Dpt_l_foot[2] = sp.Symbol('DPT_3_29')
Dpt_r_foot_cont = nb_contacts * [None]
Dpt_l_foot_cont = nb_contacts * [None]
for i in range(0, nb_contacts):
Dpt_r_foot_cont[i] = sp.zeros(3, 1)
Dpt_l_foot_cont[i] = sp.zeros(3, 1)
Dpt_r_foot_cont[0][0] = x_min
Dpt_r_foot_cont[1][0] = x_min
Dpt_r_foot_cont[2][0] = x_max
Dpt_r_foot_cont[3][0] = x_max
Dpt_r_foot_cont[0][1] = y_min
Dpt_r_foot_cont[1][1] = y_max
Dpt_r_foot_cont[2][1] = y_min
Dpt_r_foot_cont[3][1] = y_max
for i in range(0, nb_contacts):
Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16')
for i in range(0, nb_contacts):
for j in range(0, 3):
Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j]
x_r_cont = nb_contacts * [None]
x_l_cont = nb_contacts * [None]
# computation
om_tilde_r_foot = get_tilde(om_r_foot)
om_tilde_l_foot = get_tilde(om_l_foot)
x_r = x_r_foot + R_r_foot.T * Dpt_r_foot
x_l = x_l_foot + R_l_foot.T * Dpt_l_foot
xp_r = xp_r_foot + om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot)
xp_l = xp_l_foot + om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot)
for i in range(0, nb_contacts):
x_r_cont[i] = x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i]
x_l_cont[i] = x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i]
# writing outputs
out_file.write(' // right foot absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Rfoot[{}] = {};\n'.format(i, x_r[i]))
out_file.write('\n')
out_file.write(' // right foot absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Rfoot[{}] = {};\n'.format(i, xp_r[i]))
out_file.write('\n')
out_file.write(' // right foot jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left foot absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Lfoot[{}] = {};\n'.format(i, x_l[i]))
out_file.write('\n')
out_file.write(' // left foot absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Lfoot[{}] = {};\n'.format(i, xp_l[i]))
out_file.write('\n')
out_file.write(' // left foot jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // right foot contact points absolute position\n')
for i in range(0, nb_contacts):
for j in range(0, 3):
out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\n'.format(i, j, x_r_cont[i][j]))
out_file.write('\n')
out_file.write(' // right foot contact points jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_contacts):
for j in range (1, nb_bodies):
flag_print = 0
for k in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left foot contact points absolute position\n')
for i in range(0, nb_contacts):
for j in range(0, 3):
out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\n'.format(i, j, x_l_cont[i][j]))
out_file.write('\n')
out_file.write(' // left foot contact points jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_contacts):
for j in range (1, nb_bodies):
flag_print = 0
for k in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // feet absolute orientation\n')
for i in range(0, 9):
out_file.write(' in_out.Rfoot_or[{}] = {};\n'.format(i, R_r_foot[i]))
out_file.write('\n')
for i in range(0, 9):
out_file.write(' in_out.Lfoot_or[{}] = {};\n'.format(i, R_l_foot[i]))
out_file.write('\n')
out_file.write(' // right foot absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left foot absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // right foot orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1)
out_file.write('\n')
out_file.write(' // left foot orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1)
out_file.write('\n')
out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\n')
out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\n')
out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\n')
out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\n\n')
out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\n')
out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\n')
out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\n')
out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\n\n')
out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\n {\n')
out_file.write(' return;\n }\n\n')
out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\n')
out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\n\n')
out_file.write(' // right foot orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot')
out_file.write('\n')
out_file.write(' // left foot orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot')
out_file.write('\n')
# compute the wrists position, velocity and orientation
def wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z):
# symbolic variables declarations
x_r_elb = x[r_elb_id]
x_l_elb = x[l_elb_id]
xp_r_elb = xp[r_elb_id]
xp_l_elb = xp[l_elb_id]
om_r_elb = om[r_elb_id]
om_l_elb = om[l_elb_id]
R_r_elb = R[r_elb_id]
R_l_elb = R[l_elb_id]
Dpt_r_wrist = sp.zeros(3, 1)
Dpt_l_wrist = sp.zeros(3, 1)
Dpt_r_wrist[0] = r_wrist_x
Dpt_r_wrist[1] = r_wrist_y
Dpt_r_wrist[2] = r_wrist_z
Dpt_l_wrist[0] = r_wrist_x
Dpt_l_wrist[1] = -r_wrist_y
Dpt_l_wrist[2] = r_wrist_z
# computation
om_tilde_r_elb = get_tilde(om_r_elb)
om_tilde_l_elb = get_tilde(om_l_elb)
x_r = x_r_elb + R_r_elb.T * Dpt_r_wrist
x_l = x_l_elb + R_l_elb.T * Dpt_l_wrist
xp_r = xp_r_elb + om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist)
xp_l = xp_l_elb + om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist)
# writing outputs
out_file.write(' // right wrist absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Rwrist[{}] = {};\n'.format(i, x_r[i]))
out_file.write('\n')
out_file.write(' // right wrist absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Rwrist[{}] = {};\n'.format(i, xp_r[i]))
out_file.write('\n')
out_file.write(' // right wrist jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left wrist absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Lwrist[{}] = {};\n'.format(i, x_l[i]))
out_file.write('\n')
out_file.write(' // left wrist absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Lwrist[{}] = {};\n'.format(i, xp_l[i]))
out_file.write('\n')
out_file.write(' // left wrist jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // wrists absolute orientation\n')
for i in range(0, 9):
out_file.write(' in_out.Rwrist_or[{}] = {};\n'.format(i, R_r_elb[i]))
out_file.write('\n')
for i in range(0, 9):
out_file.write(' in_out.Lwrist_or[{}] = {};\n'.format(i, R_l_elb[i]))
out_file.write('\n')
out_file.write(' // right wrist absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left wrist absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
# get a string for the enumeration of joints
def get_string_enum(cur_string):
cur_split = cur_string.split('_')
if len(cur_split) >= 2:
new_string = cur_split[0]
for i in range(1, len(cur_split)-1):
new_string = '{}{}'.format(new_string, cur_split[i])
else:
new_string = cur_string
cur_split = filter(None, re.split("([A-Z][^A-Z]*)", new_string))
new_string = cur_split[0].upper()
for i in range(1, len(cur_split)):
new_string = '{}_{}'.format(new_string, cur_split[i].upper())
return new_string
# write the end of the file
def write_file_end(out_file):
out_file.write('}\n')
# print matrix components declaration
def write_matrix_declaration(out_file, prefix):
out_file.write(' double ')
for i in range(0,3):
for j in range(0,3):
out_file.write('{}{}{}'.format(prefix, i+1, j+1))
if i == 2 and j == 2:
out_file.write(';\n')
else:
out_file.write(', ')
# print variables declaration
def write_variables_declaration(out_file, prefix, min, max):
out_file.write(' double ')
for i in range(min, max+1):
out_file.write('{}{}'.format(prefix, i))
if i == max:
out_file.write(';\n')
else:
out_file.write(', ')
# variables initialization
def write_intialization(out_file, nb_bodies, joint_id_names):
out_file.write(' // -- variables initialization -- //\n')
out_file.write('\n // IMU - rotation matrices\n')
for i in range(0, 3):
for j in range(0, 3):
out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\n'.format(i+1, j+1, 3*i+j))
out_file.write('\n // IMU - angles velocity\n')
for i in range(0, 3):
out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\n'.format(i+1, i))
out_file.write('\n // joint cosines\n')
for i in range(1, nb_bodies):
out_file.write(' c{} = cos(in_out.q_mot[{}]);\n'.format(i+1, joint_id_names[i]))
out_file.write('\n // joint sines\n')
for i in range(1, nb_bodies):
out_file.write(' s{} = sin(in_out.q_mot[{}]);\n'.format(i+1, joint_id_names[i]))
out_file.write('\n // joint relative velocities\n')
for i in range(1, nb_bodies):
out_file.write(' Om{} = in_out.qd_mot[{}];\n'.format(i+1, joint_id_names[i]))
# write symbolic vector and replace symbolic variable by its name
def write_symb_vector(out_file, vector, start_name, end_name):
new_vector = sp.zeros(3, 1)
flag_print = 0
for i in range(0,3):
if vector[i] == 0 or vector[i] == 1:
new_vector[i] = vector[i]
else:
flag_print = 1
elem_name = '{}{}{}'.format(start_name, i+1, end_name)
out_file.write(' {} = {};\n'.format(elem_name, vector[i]).replace('1.0*',''))
new_vector[i] = sp.Symbol(elem_name)
if flag_print:
out_file.write('\n')
return new_vector
# write symbolic matrix and replace symbolic variable by its name
def write_symb_matrix(out_file, matrix, start_name, end_name):
new_matrix = sp.zeros(3, 3)
flag_print = 0
for i in range(0,3):
for j in range(0,3):
if matrix[i,j] == 0 or matrix[i,j] == 1:
new_matrix[i,j] = matrix[i,j]
else:
flag_print = 1
elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name)
out_file.write(' {} = {};\n'.format(elem_name, matrix[i,j]).replace('1.0*',''))
new_matrix[i,j] = sp.Symbol(elem_name)
if flag_print:
out_file.write('\n')
return new_matrix
# save the symbolic vector for print
def print_save_symb_vector(vector, start_name, end_name):
new_vector = sp.zeros(3, 1)
save_vector = 3 * [None]
for i in range(0,3):
if vector[i] == 0 or vector[i] == 1:
new_vector[i] = vector[i]
save_vector[i] = None
else:
elem_name = '{}{}{}'.format(start_name, i+1, end_name)
save_vector[i] = ' {} = {};\n'.format(elem_name, vector[i]).replace('1.0*','')
new_vector[i] = sp.Symbol(elem_name)
return new_vector, save_vector
# save the symbolic matrix for print
def print_save_symb_matrix(matrix, start_name, end_name):
new_matrix = sp.zeros(3, 3)
save_matrix = 9 * [None]
for i in range(0,3):
for j in range(0,3):
if matrix[i,j] == 0 or matrix[i,j] == 1:
new_matrix[i,j] = matrix[i,j]
save_matrix[3*i+j] = None
else:
elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name)
save_matrix[3*i+j] = ' {} = {};\n'.format(elem_name, matrix[i,j]).replace('1.0*','')
new_matrix[i,j] = sp.Symbol(elem_name)
return new_matrix, save_matrix
# write symbolic jacobian of a rotation matrix
def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix, index):
# loop on all the joints
for i in range (1, nb_bodies):
new_matrix = sp.zeros(3, 3)
# loop on all the matrix elements
for j in range(0, 9):
new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1))
[Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1))
# write symbolic jacobian of an anchor point
def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector, index):
# loop on all the joints
for i in range (1, nb_bodies):
new_vector = sp.zeros(3, 1)
# loop on all the vector elements
for j in range(0, 3):
new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1))
[xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1))
# write symbolic jacobian of a com point
def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector, index):
# loop on all the joints
for i in range (1, nb_bodies):
new_vector = sp.zeros(3, 1)
# loop on all the vector elements
for j in range(0, 3):
new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1))
[xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1))
# symbolic computation
def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M):
out_file.write('\n\n // -- symbolic computation -- //\n')
# Rj, xj, xgj and xgj (jacobian)
Rj = nb_bodies*[None]
xj = nb_bodies*[None]
xgj = nb_bodies*[None]
Rj_print = nb_bodies*[None]
xj_print = nb_bodies*[None]
xgj_print = nb_bodies*[None]
for i in range(0, nb_bodies):
Rj[i] = nb_bodies*[None]
xj[i] = nb_bodies*[None]
xgj[i] = nb_bodies*[None]
Rj_print[i] = nb_bodies*[None]
xj_print[i] = nb_bodies*[None]
xgj_print[i] = nb_bodies*[None]
for j in range(0, nb_bodies-1):
Rj[i][j] = sp.zeros(3, 3)
xj[i][j] = sp.zeros(3, 1)
xgj[i][j] = sp.zeros(3, 1)
Rj_print[i][j] = 9 * [None]
xj_print[i][j] = 3 * [None]
xgj_print[i][j] = 3 * [None]
# rotation matrices
out_file.write('\n // rotation matrices\n')
R = nb_bodies*[None]
Rt = nb_bodies*[None]
Rd = nb_bodies*[None]
Rd[0] = sp.zeros(3, 3)
R[0] = sp.zeros(3, 3)
for i in range(0, 3):
for j in range(0, 3):
R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1))
write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0], 1)
R[0] = write_symb_matrix(out_file, R[0], 'R1_', '')
Rt[0] = R[0].T
for i in range(1, nb_bodies):
Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1)))
R[i] = Rd[i] * R[parent_body_index[i]]
write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i], i+1)
R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '')
Rt[i] = R[i].T
# jacobian rotation matrices
out_file.write('\n // jacobian rotation matrices\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_bodies):
for j in range(1, nb_bodies):
flag_print = 0
for k in range(0, 9):
if Rj_print[i][j][k] != None:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write('{}'.format(Rj_print[i][j][k]))
out_file.write(' }\n')
# omega
out_file.write('\n // joint absolute velocities\n')
Om = nb_bodies*[None]
om = nb_bodies*[None]
om_tilde = nb_bodies*[None]
Om[0] = sp.zeros(3, 1)
om[0] = sp.zeros(3, 1)
for i in range(0,3):
om[0][i] = sp.Symbol('omega_{}'.format(i+1))
om[0] = write_symb_vector(out_file, om[0], 'om1_', '')
om_tilde[0] = get_tilde(om[0])
for i in range(1, nb_bodies):
parent_id = parent_body_index[i]
Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1)))
om[i] = om[parent_id] + Rt[parent_id] * Om[i]
om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '')
om_tilde[i] = get_tilde(om[i])
# x & xp
out_file.write('\n // anchor point absolute positions and velocities\n')
x = nb_bodies*[None]
xp = nb_bodies*[None]
x[0] = Rt[0] * Dpt[0]
xp[0] = om_tilde[0] * (Rt[0] * Dpt[0])
write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0], 1)
x[0] = write_symb_vector(out_file, x[0], 'x1_', '')
xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '')
for i in range(1, nb_bodies):
parent_id = parent_body_index[i]
x[i] = x[parent_id] + Rt[parent_id] * Dpt[i]
xp[i] = xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] * Dpt[i])
write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i], i+1)
x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '')
xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '')
# jacobian x
out_file.write('\n // jacobian anchor point positions\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_bodies):
for j in range(1, nb_bodies):
flag_print = 0
for k in range(0, 3):
if xj_print[i][j][k] != None:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write('{}'.format(xj_print[i][j][k]))
out_file.write(' }\n')
# xg & xgp
out_file.write('\n // com absolute positions and velocities\n')
xg = nb_bodies*[None]
xgp = nb_bodies*[None]
for i in range(0, nb_bodies):
xg[i] = x[i] + Rt[i] * Dg[i]
xgp[i] = xp[i] + om_tilde[i] * (Rt[i] * Dg[i])
write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i], i+1)
xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '')
xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '')
# jacobian xg
out_file.write('\n // jacobian com absolute positions\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_bodies):
for j in range(1, nb_bodies):
flag_print = 0
for k in range(0, 3):
if xgj_print[i][j][k] != None:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write('{}'.format(xgj_print[i][j][k]))
out_file.write(' }\n')
# results
out_file.write('\n // -- Collecting results -- //\n\n')
com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj)
feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 6, 12, -0.06, 0.08, -0.045, 0.045)
wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 19, 23, -0.02, -0.005, -0.225)
torso_waist_angles(out_file, R, om, 0, 15)
# generate the symbolic output file
def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M):
# temporary file
in_temp = './{}_temp.cc'.format(out_file_name)
file_temp = open(in_temp, 'w')
# beginning of the file
write_file_beginning(file_temp, joint_id_names)
# variables initialization
write_intialization(file_temp, nb_bodies, joint_id_names)
# symbolic computation
symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M)
# end of the file
write_file_end(file_temp)
file_temp.close()
# output file
out_file = open('./{}.cc'.format(out_file_name), 'w')
with open(in_temp, 'r') as f:
# loop on all the lines
for line in f:
# declaration
if len(line.split('// -- variables initialization -- //')) != 1:
out_file.write(' // -- variables declaration -- //\n\n')
print_all_declaration(in_temp, out_file, 100)
out_file.write('\n\n')
# copy temporary file
out_file.write(line)
out_file.close()
# remove temporary file
os.remove(in_temp)
# main script
# rotation axis for each joint before body i (1:x, 2:y, 3:z)
rot_axis = np.array([0, # waist
2, 1, 3, 2, 1, 2, # right leg
2, 1, 3, 2, 1, 2, # left leg
1, 2, 3, # trunk
2, 1, 3, 2, # right arm
2, 1, 3, 2 # left arm
])
# parent index
parent_body_index = np.array([ -1, # waist
0, 1, 2, 3, 4, 5, # right leg
0, 7, 8, 9, 10, 11, # left leg
0, 13, 14, # trunk
15, 16, 17, 18, # right arm
15, 20, 21, 22 # left arm
])
nb_bodies = len(parent_body_index)
## anchor point positions
Dpt = nb_bodies*[None]
# waist
Dpt[0] = sp.Matrix([0.0, 0.0, 0.0])
# right leg
Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0])
Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0])
Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')])
Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')])
Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')])
Dpt[6] = sp.Matrix([0.0, 0.0, 0.0])
# left leg
Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0])
Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0])
Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')])
Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')])
Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')])
Dpt[12] = sp.Matrix([0.0, 0.0, 0.0])
# trunk
Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')])
Dpt[14] = sp.Matrix([0.0, 0.0, 0.0])
Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')])
# right arm
Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')])
Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0])
Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')])
Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')])
# left arm
Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')])
Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0])
Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')])
Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')])
## COM positions
Dg = nb_bodies*[None]
# waist
Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')])
# right leg
Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')])
Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')])
Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')])
Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')])
Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')])
Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')])
# left leg
Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')])
Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')])
Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')])
Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')])
Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')])
Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')])
# trunk
Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')])
Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')])
Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')])
# right arm
Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')])
Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')])
Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')])
Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')])
# left arm
Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')])
Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')])
Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')])
Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')])
# masses
M = np.array([ 'M_6', # waist
'M_7' , 'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12', # right leg
'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left leg
'M_19', 'M_20', 'M_21', # trunk
'M_22', 'M_23', 'M_24', 'M_25', # right arm
'M_26', 'M_27', 'M_28', 'M_29' # left arm
])
# joint names
joint_id_names = np.array(['0', # waist
'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg
'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg
'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk
'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right arm
'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm
])
out_file_name = 'forward_kinematics'
gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M)
| 2.515625 | 3 |
examples/last.py | 0xiso/PyMISP | 5 | 2697 | <reponame>0xiso/PyMISP
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from keys import misp_url, misp_key, misp_verifycert
import argparse
import os
import json
# Usage for pipe masters: ./last.py -l 5h | jq .
def init(url, key):
return PyMISP(url, key, misp_verifycert, 'json')
def download_last(m, last, out=None):
result = m.download_last(last)
if out is None:
if 'response' in result:
print(json.dumps(result['response']))
else:
print('No results for that time period')
exit(0)
else:
with open(out, 'w') as f:
f.write(json.dumps(result['response']))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download latest events from a MISP instance.')
parser.add_argument("-l", "--last", required=True, help="can be defined in days, hours, minutes (for example 5d or 12h or 30m).")
parser.add_argument("-o", "--output", help="Output file")
args = parser.parse_args()
if args.output is not None and os.path.exists(args.output):
print('Output file already exists, abord.')
exit(0)
misp = init(misp_url, misp_key)
download_last(misp, args.last, args.output)
| 2.609375 | 3 |
saleor/dashboard/urls.py | Chaoslecion123/Diver | 0 | 2698 | from django.conf.urls import include, url
from django.views.generic.base import TemplateView
from . import views as core_views
from .category.urls import urlpatterns as category_urls
from .collection.urls import urlpatterns as collection_urls
from .customer.urls import urlpatterns as customer_urls
from .discount.urls import urlpatterns as discount_urls
from .menu.urls import urlpatterns as menu_urls
from .order.urls import urlpatterns as order_urls
from .page.urls import urlpatterns as page_urls
from .product.urls import urlpatterns as product_urls
from .search.urls import urlpatterns as search_urls
from .shipping.urls import urlpatterns as shipping_urls
from .sites.urls import urlpatterns as site_urls
from .staff.urls import urlpatterns as staff_urls
from .taxes.urls import urlpatterns as taxes_urls
# BEGIN :: SoftButterfly Extensions --------------------------------------------
from .brand.urls import urlpatterns as brand_urls
from .widget.slider.urls import urlpatterns as slider_urls
from .widget.banner.urls import urlpatterns as banner_urls
from .widget.scene.urls import urlpatterns as scene_urls
from .widget.benefit.urls import urlpatterns as benefit_urls
from .store.physical_store.urls import urlpatterns as store_urls
from .store.social_network.urls import urlpatterns as social_network_urls
from .store.special_page.urls import urlpatterns as special_page_urls
from .store.bank_account.urls import urlpatterns as bank_account_urls
from .store.footer_item.urls import urlpatterns as footer_item_urls
# END :: SoftButterfly Extensions ----------------------------------------------
urlpatterns = [
url(r'^$', core_views.index, name='index'),
url(r'^categories/', include(category_urls)),
url(r'^collections/', include(collection_urls)),
url(r'^orders/', include(order_urls)),
url(r'^page/', include(page_urls)),
url(r'^products/', include(product_urls)),
url(r'^customers/', include(customer_urls)),
url(r'^staff/', include(staff_urls)),
url(r'^discounts/', include(discount_urls)),
url(r'^settings/', include(
site_urls + social_network_urls
+ special_page_urls + bank_account_urls + footer_item_urls)), # Extensions
url(r'^menu/', include(menu_urls)),
url(r'^shipping/', include(shipping_urls)),
url(r'^style-guide/', core_views.styleguide, name='styleguide'),
url(r'^search/', include(search_urls)),
url(r'^taxes/', include(taxes_urls)),
url(r'^next/', TemplateView.as_view(template_name='dashboard/next.html')),
# BEGIN :: SoftButterfly Extensions ----------------------------------------
url(r'^brand/', include(brand_urls)),
url(r'^slider/', include(slider_urls)),
url(r'^banner/', include(banner_urls)),
url(r'^scene/', include(scene_urls)),
url(r'^store/', include(store_urls)),
url(r'^benefit/', include(benefit_urls)),
# END :: SoftButterfly Extensions ------------------------------------------
]
| 1.601563 | 2 |
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py | Jf-Chen/FRN-main | 43 | 2699 | import os
import sys
import torch
import yaml
from functools import partial
sys.path.append('../../../../')
from trainers import trainer, frn_train
from datasets import dataloaders
from models.FRN import FRN
args = trainer.train_parser()
with open('../../../../config.yml', 'r') as f:
temp = yaml.safe_load(f)
data_path = os.path.abspath(temp['data_path'])
fewshot_path = os.path.join(data_path,'CUB_fewshot_raw')
pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args)
train_way = args.train_way
shots = [args.train_shot, args.train_query_shot]
train_loader = dataloaders.meta_train_dataloader(data_path=pm.train,
way=train_way,
shots=shots,
transform_type=args.train_transform_type)
model = FRN(way=train_way,
shots=[args.train_shot, args.train_query_shot],
resnet=args.resnet)
train_func = partial(frn_train.default_train,train_loader=train_loader)
tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func)
tm.train(model)
tm.evaluate(model) | 2.109375 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.