max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_hrepr.py | fabaff/hrepr | 0 | 3200 | from dataclasses import dataclass
from hrepr import H
from hrepr import hrepr as real_hrepr
from hrepr.h import styledir
from .common import one_test_per_assert
css_hrepr = open(f"{styledir}/hrepr.css", encoding="utf-8").read()
hrepr = real_hrepr.variant(fill_resources=False)
@dataclass
class Point:
x: int
y: int
class Opaque:
pass
def hshort(x, **kw):
return hrepr(x, max_depth=0, **kw)
@one_test_per_assert
def test_singletons():
assert hrepr(True) == H.span["hreprv-True"]("True")
assert hrepr(False) == H.span["hreprv-False"]("False")
assert hrepr(None) == H.span["hreprv-None"]("None")
@one_test_per_assert
def test_numbers():
assert hrepr(123) == H.span["hreprt-int"]("123")
assert hrepr(1.25) == H.span["hreprt-float"]("1.25")
@one_test_per_assert
def test_string():
assert hshort("hello") == H.span["hreprt-str"]("hello")
assert hrepr("3 spaces") == H.span["hreprt-str"]("3 spaces")
assert hrepr("hello this is a bit long") == H.span["hreprt-str"](
"hello this is a bit long"
)
assert hshort("hello this is a bit long") == H.span["hreprt-str"](
"hello this is a b..."
)
assert hshort("hello this is a bit long", string_cutoff=10) == H.span[
"hreprt-str"
]("hello t...")
assert hshort("hello this is a bit long", string_cutoff=5) == H.span[
"hreprt-str"
]("he...")
assert hshort("hello this is a bit long", string_cutoff=10000) == H.span[
"hreprt-str"
]("hello this is a bit long")
@one_test_per_assert
def test_bytes():
assert hrepr(b"hello") == H.span["hreprt-bytes"]("68656c6c6f")
assert hshort(b"hello") == H.span["hreprt-bytes"]("68656c6c6f")
assert hrepr(b"hello this is a bit long") == H.span["hreprt-bytes"](
"68656c6c6f2074686973206973206120626974206c6f6e67"
)
assert hshort(b"hello this is a bit long") == H.span["hreprt-bytes"](
"68656c6c6f2074686..."
)
def test_function():
assert hrepr(Opaque) == H.span["hreprk-class"](
H.span["hrepr-defn-key"]("class"),
" ",
H.span["hrepr-defn-name"]("Opaque"),
)
def test_structures():
for typ, o, c in (
(tuple, "(", ")"),
(list, "[", "]"),
(set, "{", "}"),
(frozenset, "{", "}"),
):
clsname = typ.__name__
assert hrepr(typ((1, 2))) == H.div[
f"hreprt-{clsname}", "hrepr-bracketed"
](
H.div["hrepr-open"](o),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"](c),
)
def test_short_structures():
for val, o, c in (
((1, 2), "(", ")"),
([1, 2], "[", "]"),
({1, 2}, "{", "}"),
(frozenset({1, 2}), "{", "}"),
({"x": 1, "y": 2}, "{", "}"),
):
clsname = type(val).__name__
assert hrepr(val, max_depth=0) == H.div[
f"hreprt-{clsname}", "hrepr-bracketed"
](
H.div["hrepr-open"](o),
H.div["hreprl-s", "hrepr-body"](H.div("...")),
H.div["hrepr-close"](c),
)
def test_dict():
pt = {"x": 1, "y": 2}
assert hrepr(pt) == H.div["hreprt-dict", "hrepr-bracketed"](
H.div["hrepr-open"]("{"),
H.table["hrepr-body"](
H.tr(
H.td(H.span["hreprt-str"]("x")),
H.td["hrepr-delim"](": "),
H.td(H.span["hreprt-int"]("1")),
),
H.tr(
H.td(H.span["hreprt-str"]("y")),
H.td["hrepr-delim"](": "),
H.td(H.span["hreprt-int"]("2")),
),
),
H.div["hrepr-close"]("}"),
)
def test_dataclass():
pt = Point(1, 2)
assert hrepr(pt) == H.div["hreprt-Point", "hrepr-instance", "hreprl-v"](
H.div["hrepr-title"]("Point"),
H.table["hrepr-body"](
H.tr(
H.td(H.span["hreprt-symbol"]("x")),
H.td["hrepr-delim"]("="),
H.td(H.span["hreprt-int"]("1")),
),
H.tr(
H.td(H.span["hreprt-symbol"]("y")),
H.td["hrepr-delim"]("="),
H.td(H.span["hreprt-int"]("2")),
),
),
)
assert hrepr(pt, max_depth=0) == H.div[
"hreprt-Point", "hrepr-instance", "hreprl-s"
](
H.div["hrepr-title"]("Point"),
H.div["hreprl-s", "hrepr-body"](H.div("...")),
)
def test_tag():
tg = H.span["hello"](1, 2, H.b("there"))
assert hrepr(tg) == tg
def test_multiref():
li = [1, 2]
lili = [li, li]
assert hrepr(lili) == H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"]("]"),
),
)
),
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-s", "hrepr-body"](H.div("..."),),
H.div["hrepr-close"]("]"),
),
)
),
),
H.div["hrepr-close"]("]"),
)
assert hrepr(lili, shortrefs=True) == H.div[
"hreprt-list", "hrepr-bracketed"
](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"]("]"),
),
)
),
H.div(H.span["hrepr-ref"]("#", 1)),
),
H.div["hrepr-close"]("]"),
)
def test_recursive():
li = [1]
li.append(li)
assert hrepr(li) == H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("⟳", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-s", "hrepr-body"](H.div("..."),),
H.div["hrepr-close"]("]"),
),
)
),
),
H.div["hrepr-close"]("]"),
),
)
assert hrepr(li, shortrefs=True) == H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hrepr-ref"]("⟳", 1)),
),
H.div["hrepr-close"]("]"),
),
)
def test_unsupported():
assert hshort(Opaque()) == H.span["hreprt-Opaque"](
"<", "tests.test_hrepr.Opaque", ">"
)
def test_as_page():
utf8 = H.meta(
{"http-equiv": "Content-type"}, content="text/html", charset="UTF-8"
)
assert real_hrepr.page(1) == H.inline(
H.raw("<!DOCTYPE html>"),
H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),),
)
def test_hrepr_multiarg():
assert hrepr(1, 2) == H.inline(
H.span["hreprt-int"]("1"), H.span["hreprt-int"]("2"),
)
def test_preprocess():
assert hrepr(1, preprocess=lambda x, hrepr: x + 1) == H.span["hreprt-int"](
"2"
)
def test_postprocess():
assert hrepr(1, postprocess=lambda x, obj, hrepr: x["newclass"]) == H.span[
"newclass", "hreprt-int"
]("1")
| 2.46875 | 2 |
sympy/assumptions/assume.py | shivangdubey/sympy | 2 | 3201 | <filename>sympy/assumptions/assume.py
import inspect
from sympy.core.cache import cacheit
from sympy.core.singleton import S
from sympy.core.sympify import _sympify
from sympy.logic.boolalg import Boolean
from sympy.utilities.source import get_class
from contextlib import contextmanager
class AssumptionsContext(set):
"""Set representing assumptions.
This is used to represent global assumptions, but you can also use this
class to create your own local assumptions contexts. It is basically a thin
wrapper to Python's set, so see its documentation for advanced usage.
Examples
========
>>> from sympy import Q
>>> from sympy.assumptions.assume import global_assumptions
>>> global_assumptions
AssumptionsContext()
>>> from sympy.abc import x
>>> global_assumptions.add(Q.real(x))
>>> global_assumptions
AssumptionsContext({Q.real(x)})
>>> global_assumptions.remove(Q.real(x))
>>> global_assumptions
AssumptionsContext()
>>> global_assumptions.clear()
"""
def add(self, *assumptions):
"""Add an assumption."""
for a in assumptions:
super().add(a)
def _sympystr(self, printer):
if not self:
return "%s()" % self.__class__.__name__
return "{}({})".format(self.__class__.__name__, printer._print_set(self))
global_assumptions = AssumptionsContext()
class AppliedPredicate(Boolean):
"""The class of expressions resulting from applying a Predicate.
Examples
========
>>> from sympy import Q, Symbol
>>> x = Symbol('x')
>>> Q.integer(x)
Q.integer(x)
>>> type(Q.integer(x))
<class 'sympy.assumptions.assume.AppliedPredicate'>
"""
__slots__ = ()
def __new__(cls, predicate, arg):
arg = _sympify(arg)
return Boolean.__new__(cls, predicate, arg)
is_Atom = True # do not attempt to decompose this
@property
def arg(self):
"""
Return the expression used by this assumption.
Examples
========
>>> from sympy import Q, Symbol
>>> x = Symbol('x')
>>> a = Q.integer(x + 1)
>>> a.arg
x + 1
"""
return self._args[1]
@property
def args(self):
return self._args[1:]
@property
def func(self):
return self._args[0]
@cacheit
def sort_key(self, order=None):
return (self.class_key(), (2, (self.func.name, self.arg.sort_key())),
S.One.sort_key(), S.One)
def __eq__(self, other):
if type(other) is AppliedPredicate:
return self._args == other._args
return False
def __hash__(self):
return super().__hash__()
def _eval_ask(self, assumptions):
return self.func.eval(self.arg, assumptions)
@property
def binary_symbols(self):
from sympy.core.relational import Eq, Ne
if self.func.name in ['is_true', 'is_false']:
i = self.arg
if i.is_Boolean or i.is_Symbol or isinstance(i, (Eq, Ne)):
return i.binary_symbols
return set()
class Predicate(Boolean):
"""A predicate is a function that returns a boolean value.
Predicates merely wrap their argument and remain unevaluated:
>>> from sympy import Q, ask
>>> type(Q.prime)
<class 'sympy.assumptions.assume.Predicate'>
>>> Q.prime.name
'prime'
>>> Q.prime(7)
Q.prime(7)
>>> _.func.name
'prime'
To obtain the truth value of an expression containing predicates, use
the function ``ask``:
>>> ask(Q.prime(7))
True
The tautological predicate ``Q.is_true`` can be used to wrap other objects:
>>> from sympy.abc import x
>>> Q.is_true(x > 1)
Q.is_true(x > 1)
"""
is_Atom = True
def __new__(cls, name, handlers=None):
obj = Boolean.__new__(cls)
obj.name = name
obj.handlers = handlers or []
return obj
def _hashable_content(self):
return (self.name,)
def __getnewargs__(self):
return (self.name,)
def __call__(self, expr):
return AppliedPredicate(self, expr)
def add_handler(self, handler):
self.handlers.append(handler)
def remove_handler(self, handler):
self.handlers.remove(handler)
@cacheit
def sort_key(self, order=None):
return self.class_key(), (1, (self.name,)), S.One.sort_key(), S.One
def eval(self, expr, assumptions=True):
"""
Evaluate self(expr) under the given assumptions.
This uses only direct resolution methods, not logical inference.
"""
res, _res = None, None
mro = inspect.getmro(type(expr))
for handler in self.handlers:
cls = get_class(handler)
for subclass in mro:
eval_ = getattr(cls, subclass.__name__, None)
if eval_ is None:
continue
res = eval_(expr, assumptions)
# Do not stop if value returned is None
# Try to check for higher classes
if res is None:
continue
if _res is None:
_res = res
elif res is None:
# since first resolutor was conclusive, we keep that value
res = _res
else:
# only check consistency if both resolutors have concluded
if _res != res:
raise ValueError('incompatible resolutors')
break
return res
@contextmanager
def assuming(*assumptions):
""" Context manager for assumptions
Examples
========
>>> from sympy.assumptions import assuming, Q, ask
>>> from sympy.abc import x, y
>>> print(ask(Q.integer(x + y)))
None
>>> with assuming(Q.integer(x), Q.integer(y)):
... print(ask(Q.integer(x + y)))
True
"""
old_global_assumptions = global_assumptions.copy()
global_assumptions.update(assumptions)
try:
yield
finally:
global_assumptions.clear()
global_assumptions.update(old_global_assumptions)
| 2.65625 | 3 |
distancematrix/tests/consumer/test_distance_matrix.py | IDLabResearch/seriesdistancematrix | 12 | 3202 | import numpy as np
from unittest import TestCase
import numpy.testing as npt
from distancematrix.util import diag_indices_of
from distancematrix.consumer.distance_matrix import DistanceMatrix
class TestContextualMatrixProfile(TestCase):
def setUp(self):
self.dist_matrix = np.array([
[8.67, 1.10, 1.77, 1.26, 1.91, 4.29, 6.32, 4.24, 4.64, 5.06, 6.41, 4.07, 4.67, 9.32, 5.09],
[4.33, 4.99, 0.14, 2.79, 2.10, 6.26, 9.40, 4.14, 5.53, 4.26, 8.21, 5.91, 6.83, 9.26, 6.19],
[0.16, 9.05, 1.35, 4.78, 7.01, 4.36, 5.24, 8.81, 7.90, 5.84, 8.90, 7.88, 3.37, 4.70, 6.94],
[0.94, 8.70, 3.87, 6.29, 0.32, 1.79, 5.80, 2.61, 1.43, 6.32, 1.62, 0.20, 2.28, 7.11, 2.15],
[9.90, 4.51, 2.11, 2.83, 5.52, 8.55, 6.90, 0.24, 1.58, 4.26, 8.75, 3.71, 9.93, 8.33, 0.38],
[7.30, 5.84, 9.63, 1.95, 3.76, 3.61, 9.42, 5.56, 5.09, 7.07, 1.90, 4.78, 1.06, 0.69, 3.67],
[2.17, 8.37, 3.99, 4.28, 4.37, 2.86, 8.61, 3.39, 8.37, 6.95, 6.57, 1.79, 7.40, 4.41, 7.64],
[6.26, 0.29, 6.44, 8.84, 1.24, 2.52, 6.25, 3.07, 5.55, 3.19, 8.16, 5.32, 9.01, 0.39, 9.],
[4.67, 8.88, 3.05, 3.06, 2.36, 8.34, 4.91, 5.46, 9.25, 9.78, 0.03, 5.64, 5.10, 3.58, 6.92],
[1.01, 0.91, 6.28, 7.79, 0.68, 5.50, 6.72, 5.11, 0.80, 9.30, 9.77, 4.71, 3.26, 7.29, 6.26]])
def mock_initialise(self, dm):
dm.initialise(1, self.dist_matrix.shape[0], self.dist_matrix.shape[1])
def test_process_diagonal(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for diag in range(-self.dist_matrix.shape[0] + 1, self.dist_matrix.shape[1]):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_diagonal_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for diag in range(-8, self.dist_matrix.shape[1], 3):
diag_ind = diag_indices_of(self.dist_matrix, diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
correct[diag_ind] = self.dist_matrix[diag_ind]
npt.assert_equal(dm.distance_matrix, correct)
def test_process_column(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
for column in range(0, self.dist_matrix.shape[1]):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix)
def test_process_column_partial_calculation(self):
dm = DistanceMatrix()
self.mock_initialise(dm)
correct = np.full_like(self.dist_matrix, np.nan, dtype=float)
for column in [2, 3, 4, 5, 10, 11, 12]:
dm.process_column(column, np.atleast_2d(self.dist_matrix[:, column]))
correct[:, column] = self.dist_matrix[:, column]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_column(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_column(0, np.atleast_2d(self.dist_matrix[0, 0]))
dm.process_column(1, np.atleast_2d(self.dist_matrix[:2, 1]))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[:2, 1] = self.dist_matrix[:2, 1]
npt.assert_equal(dm.distance_matrix, expected)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[:5, :5][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(1)
dm.shift_series(3)
correct = np.full((5, 5), np.nan)
correct[0:4, 0:2] = self.dist_matrix[1:5, 3:5]
npt.assert_equal(dm.distance_matrix, correct)
for column in range(0, 5):
dm.process_column(column, np.atleast_2d(self.dist_matrix[1:6, 3:8][:, column]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[1:6, 3:8])
dm.shift_query(2)
dm.shift_series(1)
dm.process_column(4, np.atleast_2d(self.dist_matrix[3:8, 8]))
correct = np.full((5, 5), np.nan)
correct[0:3, 0:4] = self.dist_matrix[3:6, 4:8]
correct[:, 4] = self.dist_matrix[3:8, 8]
npt.assert_equal(dm.distance_matrix, correct)
def test_streaming_process_diagonal(self):
dm = DistanceMatrix()
dm.initialise(1, 5, 5)
dm.process_diagonal(0, np.atleast_2d(self.dist_matrix[0, 0]))
diag_ind = diag_indices_of(self.dist_matrix[:3, :3], 1)
dm.process_diagonal(1, np.atleast_2d(np.atleast_2d(self.dist_matrix[diag_ind])))
expected = np.full((5, 5), np.nan)
expected[0, 0] = self.dist_matrix[0, 0]
expected[0, 1] = self.dist_matrix[0, 1]
expected[1, 2] = self.dist_matrix[1, 2]
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
dm.shift_query(2)
dm.shift_series(1)
expected = self.dist_matrix[2:7, 1:6].copy()
expected[-2:, :] = np.nan
expected[:, -1:] = np.nan
npt.assert_equal(dm.distance_matrix, expected)
for diag in range(-4,5):
diag_ind = diag_indices_of(self.dist_matrix[:5, :5], diag)
dm.process_diagonal(diag, np.atleast_2d(self.dist_matrix[diag_ind]))
npt.assert_equal(dm.distance_matrix, self.dist_matrix[:5, :5])
| 2.421875 | 2 |
supervisor/const.py | peddamat/home-assistant-supervisor-test | 0 | 3203 | """Constants file for Supervisor."""
from enum import Enum
from ipaddress import ip_network
from pathlib import Path
SUPERVISOR_VERSION = "DEV"
URL_HASSIO_ADDONS = "https://github.com/home-assistant/addons"
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor.txt"
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
SUPERVISOR_DATA = Path("/data")
FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, "addons.json")
FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, "auth.json")
FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, "config.json")
FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, "discovery.json")
FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, "docker.json")
FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, "homeassistant.json")
FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, "ingress.json")
FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, "services.json")
FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, "updater.json")
FILE_SUFFIX_CONFIGURATION = [".yaml", ".yml", ".json"]
MACHINE_ID = Path("/etc/machine-id")
SOCKET_DBUS = Path("/run/dbus/system_bus_socket")
SOCKET_DOCKER = Path("/run/docker.sock")
RUN_SUPERVISOR_STATE = Path("/run/supervisor")
SYSTEMD_JOURNAL_PERSISTENT = Path("/var/log/journal")
SYSTEMD_JOURNAL_VOLATILE = Path("/run/log/journal")
DOCKER_NETWORK = "hassio"
DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23")
DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24")
# This needs to match the dockerd --cpu-rt-runtime= argument.
DOCKER_CPU_RUNTIME_TOTAL = 950_000
# The rt runtimes are guarantees, hence we cannot allocate more
# time than available! Support up to 5 containers with equal time
# allocated.
# Note that the time is multiplied by CPU count. This means that
# a single container can schedule up to 950/5*4 = 760ms in RT priority
# on a quad core system.
DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5)
DNS_SUFFIX = "local.hass.io"
LABEL_ARCH = "io.hass.arch"
LABEL_MACHINE = "io.hass.machine"
LABEL_TYPE = "io.hass.type"
LABEL_VERSION = "io.hass.version"
META_ADDON = "addon"
META_HOMEASSISTANT = "homeassistant"
META_SUPERVISOR = "supervisor"
JSON_DATA = "data"
JSON_MESSAGE = "message"
JSON_RESULT = "result"
RESULT_ERROR = "error"
RESULT_OK = "ok"
CONTENT_TYPE_BINARY = "application/octet-stream"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_PNG = "image/png"
CONTENT_TYPE_TAR = "application/tar"
CONTENT_TYPE_TEXT = "text/plain"
CONTENT_TYPE_URL = "application/x-www-form-urlencoded"
COOKIE_INGRESS = "ingress_session"
HEADER_TOKEN = "X-Supervisor-Token"
HEADER_TOKEN_OLD = "X-Hassio-Key"
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_HASSIO = "HASSIO_TOKEN"
ENV_HOMEASSISTANT_REPOSITORY = "HOMEASSISTANT_REPOSITORY"
ENV_SUPERVISOR_DEV = "SUPERVISOR_DEV"
ENV_SUPERVISOR_MACHINE = "SUPERVISOR_MACHINE"
ENV_SUPERVISOR_NAME = "SUPERVISOR_NAME"
ENV_SUPERVISOR_SHARE = "SUPERVISOR_SHARE"
ENV_SUPERVISOR_CPU_RT = "SUPERVISOR_CPU_RT"
REQUEST_FROM = "HASSIO_FROM"
ATTR_ACCESS_TOKEN = "access_token"
ATTR_ACCESSPOINTS = "accesspoints"
ATTR_ACTIVE = "active"
ATTR_ADDON = "addon"
ATTR_ADDONS = "addons"
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
ATTR_ADDONS_REPOSITORIES = "addons_repositories"
ATTR_ADDRESS = "address"
ATTR_ADDRESS_DATA = "address-data"
ATTR_ADMIN = "admin"
ATTR_ADVANCED = "advanced"
ATTR_APPARMOR = "apparmor"
ATTR_APPLICATION = "application"
ATTR_ARCH = "arch"
ATTR_ARGS = "args"
ATTR_LABELS = "labels"
ATTR_AUDIO = "audio"
ATTR_AUDIO_INPUT = "audio_input"
ATTR_AUDIO_OUTPUT = "audio_output"
ATTR_AUTH = "auth"
ATTR_AUTH_API = "auth_api"
ATTR_AUTO_UPDATE = "auto_update"
ATTR_AVAILABLE = "available"
ATTR_BLK_READ = "blk_read"
ATTR_BLK_WRITE = "blk_write"
ATTR_BOARD = "board"
ATTR_BOOT = "boot"
ATTR_BRANCH = "branch"
ATTR_BUILD = "build"
ATTR_BUILD_FROM = "build_from"
ATTR_CARD = "card"
ATTR_CHANGELOG = "changelog"
ATTR_CHANNEL = "channel"
ATTR_CHASSIS = "chassis"
ATTR_CHECKS = "checks"
ATTR_CLI = "cli"
ATTR_CONFIG = "config"
ATTR_CONFIGURATION = "configuration"
ATTR_CONNECTED = "connected"
ATTR_CONNECTIONS = "connections"
ATTR_CONTAINERS = "containers"
ATTR_CPE = "cpe"
ATTR_CPU_PERCENT = "cpu_percent"
ATTR_CRYPTO = "crypto"
ATTR_DATA = "data"
ATTR_DATE = "date"
ATTR_DEBUG = "debug"
ATTR_DEBUG_BLOCK = "debug_block"
ATTR_DEFAULT = "default"
ATTR_DEPLOYMENT = "deployment"
ATTR_DESCRIPTON = "description"
ATTR_DETACHED = "detached"
ATTR_DEVICES = "devices"
ATTR_DEVICETREE = "devicetree"
ATTR_DIAGNOSTICS = "diagnostics"
ATTR_DISCOVERY = "discovery"
ATTR_DISK = "disk"
ATTR_DISK_FREE = "disk_free"
ATTR_DISK_LIFE_TIME = "disk_life_time"
ATTR_DISK_TOTAL = "disk_total"
ATTR_DISK_USED = "disk_used"
ATTR_DNS = "dns"
ATTR_DOCKER = "docker"
ATTR_DOCKER_API = "docker_api"
ATTR_DOCUMENTATION = "documentation"
ATTR_DOMAINS = "domains"
ATTR_ENABLE = "enable"
ATTR_ENABLED = "enabled"
ATTR_ENVIRONMENT = "environment"
ATTR_EVENT = "event"
ATTR_FEATURES = "features"
ATTR_FILENAME = "filename"
ATTR_FLAGS = "flags"
ATTR_FOLDERS = "folders"
ATTR_FREQUENCY = "frequency"
ATTR_FULL_ACCESS = "full_access"
ATTR_GATEWAY = "gateway"
ATTR_GPIO = "gpio"
ATTR_HASSIO_API = "hassio_api"
ATTR_HASSIO_ROLE = "hassio_role"
ATTR_HASSOS = "hassos"
ATTR_HEALTHY = "healthy"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_HOMEASSISTANT_API = "homeassistant_api"
ATTR_HOST = "host"
ATTR_HOST_DBUS = "host_dbus"
ATTR_HOST_INTERNET = "host_internet"
ATTR_HOST_IPC = "host_ipc"
ATTR_HOST_NETWORK = "host_network"
ATTR_HOST_PID = "host_pid"
ATTR_HOSTNAME = "hostname"
ATTR_ICON = "icon"
ATTR_ID = "id"
ATTR_IMAGE = "image"
ATTR_IMAGES = "images"
ATTR_INDEX = "index"
ATTR_INGRESS = "ingress"
ATTR_INGRESS_ENTRY = "ingress_entry"
ATTR_INGRESS_PANEL = "ingress_panel"
ATTR_INGRESS_PORT = "ingress_port"
ATTR_INGRESS_TOKEN = "ingress_token"
ATTR_INGRESS_URL = "ingress_url"
ATTR_INIT = "init"
ATTR_INITIALIZE = "initialize"
ATTR_INPUT = "input"
ATTR_INSTALLED = "installed"
ATTR_INTERFACE = "interface"
ATTR_INTERFACES = "interfaces"
ATTR_IP_ADDRESS = "ip_address"
ATTR_IPV4 = "ipv4"
ATTR_IPV6 = "ipv6"
ATTR_ISSUES = "issues"
ATTR_KERNEL = "kernel"
ATTR_KERNEL_MODULES = "kernel_modules"
ATTR_LAST_BOOT = "last_boot"
ATTR_LEGACY = "legacy"
ATTR_LOCALS = "locals"
ATTR_LOCATON = "location"
ATTR_LOGGING = "logging"
ATTR_LOGO = "logo"
ATTR_LONG_DESCRIPTION = "long_description"
ATTR_MAC = "mac"
ATTR_MACHINE = "machine"
ATTR_MAINTAINER = "maintainer"
ATTR_MAP = "map"
ATTR_MEMORY_LIMIT = "memory_limit"
ATTR_MEMORY_PERCENT = "memory_percent"
ATTR_MEMORY_USAGE = "memory_usage"
ATTR_MESSAGE = "message"
ATTR_METHOD = "method"
ATTR_MODE = "mode"
ATTR_MULTICAST = "multicast"
ATTR_NAME = "name"
ATTR_NAMESERVERS = "nameservers"
ATTR_NETWORK = "network"
ATTR_NETWORK_DESCRIPTION = "network_description"
ATTR_NETWORK_RX = "network_rx"
ATTR_NETWORK_TX = "network_tx"
ATTR_OBSERVER = "observer"
ATTR_OPERATING_SYSTEM = "operating_system"
ATTR_OPTIONS = "options"
ATTR_OTA = "ota"
ATTR_OUTPUT = "output"
ATTR_PANEL_ADMIN = "panel_admin"
ATTR_PANEL_ICON = "panel_icon"
ATTR_PANEL_TITLE = "panel_title"
ATTR_PANELS = "panels"
ATTR_PARENT = "parent"
ATTR_PASSWORD = "password"
ATTR_PORT = "port"
ATTR_PORTS = "ports"
ATTR_PORTS_DESCRIPTION = "ports_description"
ATTR_PREFIX = "prefix"
ATTR_PRIMARY = "primary"
ATTR_PRIORITY = "priority"
ATTR_PRIVILEGED = "privileged"
ATTR_PROTECTED = "protected"
ATTR_PROVIDERS = "providers"
ATTR_PSK = "psk"
ATTR_RATING = "rating"
ATTR_REALTIME = "realtime"
ATTR_REFRESH_TOKEN = "refresh_token"
ATTR_REGISTRIES = "registries"
ATTR_REGISTRY = "registry"
ATTR_REPOSITORIES = "repositories"
ATTR_REPOSITORY = "repository"
ATTR_SCHEMA = "schema"
ATTR_SECURITY = "security"
ATTR_SERIAL = "serial"
ATTR_SERVERS = "servers"
ATTR_SERVICE = "service"
ATTR_SERVICES = "services"
ATTR_SESSION = "session"
ATTR_SIGNAL = "signal"
ATTR_SIZE = "size"
ATTR_SLUG = "slug"
ATTR_SNAPSHOT_EXCLUDE = "snapshot_exclude"
ATTR_SNAPSHOTS = "snapshots"
ATTR_SOURCE = "source"
ATTR_SQUASH = "squash"
ATTR_SSD = "ssid"
ATTR_SSID = "ssid"
ATTR_SSL = "ssl"
ATTR_STAGE = "stage"
ATTR_STARTUP = "startup"
ATTR_STATE = "state"
ATTR_STATIC = "static"
ATTR_STDIN = "stdin"
ATTR_STORAGE = "storage"
ATTR_SUGGESTIONS = "suggestions"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
ATTR_SUPPORTED = "supported"
ATTR_SUPPORTED_ARCH = "supported_arch"
ATTR_SYSTEM = "system"
ATTR_JOURNALD = "journald"
ATTR_TIMEOUT = "timeout"
ATTR_TIMEZONE = "timezone"
ATTR_TITLE = "title"
ATTR_TMPFS = "tmpfs"
ATTR_TOTP = "totp"
ATTR_TRANSLATIONS = "translations"
ATTR_TYPE = "type"
ATTR_UART = "uart"
ATTR_UDEV = "udev"
ATTR_UNHEALTHY = "unhealthy"
ATTR_UNSAVED = "unsaved"
ATTR_UNSUPPORTED = "unsupported"
ATTR_UPDATE_AVAILABLE = "update_available"
ATTR_UPDATE_KEY = "update_key"
ATTR_URL = "url"
ATTR_USB = "usb"
ATTR_USER = "user"
ATTR_USERNAME = "username"
ATTR_UUID = "uuid"
ATTR_VALID = "valid"
ATTR_VALUE = "value"
ATTR_VERSION = "version"
ATTR_VERSION_LATEST = "version_latest"
ATTR_VIDEO = "video"
ATTR_VLAN = "vlan"
ATTR_VOLUME = "volume"
ATTR_VPN = "vpn"
ATTR_WAIT_BOOT = "wait_boot"
ATTR_WATCHDOG = "watchdog"
ATTR_WEBUI = "webui"
ATTR_WIFI = "wifi"
ATTR_CONTENT_TRUST = "content_trust"
ATTR_FORCE_SECURITY = "force_security"
PROVIDE_SERVICE = "provide"
NEED_SERVICE = "need"
WANT_SERVICE = "want"
MAP_CONFIG = "config"
MAP_SSL = "ssl"
MAP_ADDONS = "addons"
MAP_BACKUP = "backup"
MAP_SHARE = "share"
MAP_MEDIA = "media"
ARCH_ARMHF = "armhf"
ARCH_ARMV7 = "armv7"
ARCH_AARCH64 = "aarch64"
ARCH_AMD64 = "amd64"
ARCH_I386 = "i386"
ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386]
REPOSITORY_CORE = "core"
REPOSITORY_LOCAL = "local"
FOLDER_HOMEASSISTANT = "homeassistant"
FOLDER_SHARE = "share"
FOLDER_ADDONS = "addons/local"
FOLDER_SSL = "ssl"
FOLDER_MEDIA = "media"
SNAPSHOT_FULL = "full"
SNAPSHOT_PARTIAL = "partial"
CRYPTO_AES128 = "aes128"
SECURITY_PROFILE = "profile"
SECURITY_DEFAULT = "default"
SECURITY_DISABLE = "disable"
ROLE_DEFAULT = "default"
ROLE_HOMEASSISTANT = "homeassistant"
ROLE_BACKUP = "backup"
ROLE_MANAGER = "manager"
ROLE_ADMIN = "admin"
ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN]
class AddonBoot(str, Enum):
"""Boot mode for the add-on."""
AUTO = "auto"
MANUAL = "manual"
class AddonStartup(str, Enum):
"""Startup types of Add-on."""
INITIALIZE = "initialize"
SYSTEM = "system"
SERVICES = "services"
APPLICATION = "application"
ONCE = "once"
class AddonStage(str, Enum):
"""Stage types of add-on."""
STABLE = "stable"
EXPERIMENTAL = "experimental"
DEPRECATED = "deprecated"
class AddonState(str, Enum):
"""State of add-on."""
STARTED = "started"
STOPPED = "stopped"
UNKNOWN = "unknown"
ERROR = "error"
class UpdateChannel(str, Enum):
"""Core supported update channels."""
STABLE = "stable"
BETA = "beta"
DEV = "dev"
class CoreState(str, Enum):
"""Represent current loading state."""
INITIALIZE = "initialize"
SETUP = "setup"
STARTUP = "startup"
RUNNING = "running"
FREEZE = "freeze"
SHUTDOWN = "shutdown"
STOPPING = "stopping"
CLOSE = "close"
class LogLevel(str, Enum):
"""Logging level of system."""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class HostFeature(str, Enum):
"""Host feature."""
HASSOS = "hassos"
HOSTNAME = "hostname"
NETWORK = "network"
REBOOT = "reboot"
SERVICES = "services"
SHUTDOWN = "shutdown"
| 2.125 | 2 |
quaesit/agent.py | jgregoriods/quaesit | 0 | 3204 | <gh_stars>0
import inspect
from math import hypot, sin, asin, cos, radians, degrees
from abc import ABCMeta, abstractmethod
from random import randint, choice
from typing import Dict, List, Tuple, Union
class Agent(metaclass=ABCMeta):
"""
Class to represent an agent in an agent-based model.
"""
_id = 0
colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange',
'pink', 'purple', 'red', 'yellow']
def __init__(self, world, coords: Tuple = None):
self._id = Agent._id
Agent._id += 1
self.world = world
self.coords = coords or (randint(0, self.world.width - 1),
randint(0, self.world.height - 1))
self.direction = 90
self.breed = self.__class__.__name__.lower()
self.icon = '.'
self.color = choice(self.colors)
self.world.add_agent(self)
def die(self):
"""
Remove the agent from the world.
"""
del self.world.agents[self._id]
self.world.grid[self.coords]['agents'].remove(self)
del self
def hatch(self):
"""
Creates an agent and initializes it with the same parameters as
oneself.
"""
sig = inspect.signature(self.__init__)
filter_keys = [param.name for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
filtered_dict = {filter_key: self.__dict__[filter_key]
for filter_key in filter_keys}
return self.__class__(**filtered_dict)
def move_to(self, coords: Tuple):
"""
Places the agent in a different cell of the world grid.
"""
self.world.remove_from_grid(self)
self.coords = coords
self.world.place_on_grid(self)
def cell_here(self, layer = None):
"""
Returns the value of a layer in the model's grid for the cell
where the agent is. If no layer is specified, the values of all
layers are returned.
"""
if layer is not None:
return self.world.grid[self.coords][layer]
else:
return self.world.grid[self.coords]
def get_distance(self, coords: Tuple) -> int:
"""
Returns the distance (in cells) from the agent to a pair of
coordinates.
"""
x, y = coords
return round(hypot((x - self.coords[0]), (y - self.coords[1])))
def cells_in_radius(self, radius: int) -> Dict:
"""
Returns all cells and respective attributes within a distance
of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if self.get_distance((x, y)) <= radius}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid)}
return neighborhood
def empty_cells_in_radius(self, radius: int) -> Dict:
"""
Returns all empty cells (with no agents on them) and respective
attributes within a distance of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and not
self.world.grid[self.world.to_torus((x, y))]
['agents'])}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid and not
self.world.grid[(x, y)]['agents'])}
return neighborhood
def nearest_cell(self, cells: Union[List, Dict]) -> Tuple:
"""
Given a list or dictionary of cells, returns the coordinates of
the cell that is nearest to the agent.
"""
dists = {cell: self.get_distance(cell) for cell in cells}
return min(dists, key=dists.get)
def agents_in_radius(self, radius: int):
"""
Returns all agents within a distance of oneself.
"""
neighborhood = self.cells_in_radius(radius)
neighbors = [agent for coords in neighborhood
for agent in self.world.grid[coords]['agents']
if agent is not self]
return neighbors
def agents_here(self) -> List:
"""
Returns all agents located on the same cell as oneself.
"""
return [agent for agent in self.world.grid[self.coords]['agents']
if agent is not self]
def nearest_agent(self, agents: List = None):
"""
Given a list of agents, returns the agent that is nearest to
oneself. If no list is provided, all agents are evaluated.
"""
if agents is None:
agents = [self.world.agents[_id] for _id in self.world.agents]
dists = {agent: self.get_distance(agent.coords)
for agent in agents if agent is not self}
return min(dists, key=dists.get)
def turn_right(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the right.
"""
self.direction = round((self.direction - angle) % 360)
def turn_left(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the left.
"""
self.direction = round((self.direction + angle) % 360)
def forward(self, n_steps: int = 1):
"""
Moves the agent a number of cells forward in the direction it
is currently facing.
"""
x = round(self.coords[0] + cos(radians(self.direction)) * n_steps)
y = round(self.coords[1] + sin(radians(self.direction)) * n_steps)
if self.world.torus:
self.move_to(self.world.to_torus((x, y)))
elif (x, y) in self.world.grid:
self.move_to((x, y))
def face_towards(self, coords: Tuple):
"""
Turns the agent's direction towards a given pair of coordinates.
"""
if coords != self.coords:
xdif = coords[0] - self.coords[0]
ydif = coords[1] - self.coords[1]
dist = hypot(xdif, ydif)
angle = degrees(asin(ydif / dist))
if xdif < 0:
self.direction = round(180 - angle)
else:
self.direction = round((360 + angle) % 360)
def random_walk(self, n_steps: int = 1):
"""
Moves the agent one cell forward in a random direction for a
number of times.
"""
for i in range(n_steps):
self.turn_right(randint(0, 360))
self.forward()
@abstractmethod
def step(self):
"""
Methods to be performed by the agent at each step of the
simulation.
"""
raise NotImplementedError
| 3.171875 | 3 |
models/LRF_COCO_300.py | vaesl/LRF-Net | 180 | 3205 | import torch
import torch.nn as nn
import os
import torch.nn.functional as F
class LDS(nn.Module):
def __init__(self,):
super(LDS, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)
def forward(self, x):
x_pool1 = self.pool1(x)
x_pool2 = self.pool2(x_pool1)
x_pool3 = self.pool3(x_pool2)
return x_pool3
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(ConvBlock, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=False) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class LSN_init(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_init, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1),
ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class LSN_later(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_later, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class IBN(nn.Module):
def __init__(self, out_planes, bn=True):
super(IBN, self).__init__()
self.out_channels = out_planes
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
def forward(self, x):
if self.bn is not None:
x = self.bn(x)
return x
class One_Three_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(One_Three_Conv, self).__init__()
self.out_channels = out_planes
inter_planes = in_planes // 4
self.single_branch = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class Relu_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Relu_Conv, self).__init__()
self.out_channels = out_planes
self.relu = nn.ReLU(inplace=False)
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
def forward(self, x):
x = self.relu(x)
out = self.single_branch(x)
return out
class Ds_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)):
super(Ds_Conv, self).__init__()
self.out_channels = out_planes
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class LRFNet(nn.Module):
"""LRFNet for object detection
The network is based on the SSD architecture.
Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 512
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(LRFNet, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.size = size
# vgg network
self.base = nn.ModuleList(base)
self.lds = LDS()
# convs for merging the lsn and ssd features
self.Norm1 = Relu_Conv(512, 512, stride=1)
self.Norm2 = Relu_Conv(1024, 1024, stride=1)
self.Norm3 = Relu_Conv(512, 512, stride=1)
self.Norm4 = Relu_Conv(256, 256, stride=1)
# convs for generate the lsn features
self.icn1 = LSN_init(3, 512, stride=1)
self.icn2 = LSN_later(128, 1024, stride=2)
self.icn3 = LSN_later(256, 512, stride=2)
# convs with s=2 to downsample the features
self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1))
self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1))
self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1))
# convs to reduce the feature dimensions of current level
self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1)
self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1)
self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1)
self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1)
self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.convert1 = ConvBlock(384, 256, kernel_size=1)
self.convert2 = ConvBlock(256, 512, kernel_size=1)
self.convert3 = ConvBlock(128, 256, kernel_size=1)
# convs to merge the features of the current and higher level features
self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1)
self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.ibn1 = IBN(512, bn=True)
self.ibn2 = IBN(1024, bn=True)
self.relu = nn.ReLU(inplace=False)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if self.phase == 'test':
self.softmax = nn.Softmax()
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
new_sources = list()
# apply lds to the initial image
x_pool = self.lds(x)
# apply vgg up to conv4_3
for k in range(22):
x = self.base[k](x)
conv4_3_bn = self.ibn1(x)
x_pool1_skip, x_pool1_icn = self.icn1(x_pool)
s = self.Norm1(conv4_3_bn * x_pool1_icn)
# apply vgg up to fc7
for k in range(22, 34):
x = self.base[k](x)
conv7_bn = self.ibn2(x)
x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)
p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)
x = self.base[34](x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k == 0:
x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)
w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)
elif k == 2:
q = self.Norm4(self.dsc3(w) + x)
sources.append(q)
elif k == 5 or k == 7:
sources.append(x)
else:
pass
# project the forward features into lower dimension.
tmp1 = self.proj1(p)
tmp2 = self.proj2(w)
tmp3 = self.proj3(q)
# The conv4_3 level
proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear')
proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear')
proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear')
proj = torch.cat([proj1, proj2, proj3], dim=1)
agent1 = self.agent1(s)
convert1 = self.convert1(proj)
pred1 = torch.cat([agent1, convert1], dim=1)
pred1 = self.merge1(pred1)
new_sources.append(pred1)
# The fc_7 level
proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear')
proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear')
proj = torch.cat([proj2, proj3], dim=1)
agent2 = self.agent2(p)
convert2 = self.convert2(proj)
pred2 = torch.cat([agent2, convert2], dim=1)
pred2 = self.merge2(pred2)
new_sources.append(pred2)
# The conv8 level
proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear')
proj = proj3
agent3 = self.agent3(w)
convert3 = self.convert3(proj)
pred3 = torch.cat([agent3, convert3], dim=1)
pred3 = self.merge3(pred3)
new_sources.append(pred3)
for prediction in sources:
new_sources.append(prediction)
# apply multibox head to source layers
for (x, l, c) in zip(new_sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512]}
def add_extras(size, cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
if in_channels == 256 and size == 512:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
else:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
in_channels = v
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
return layers
extras = {
'300': [1024, 'S', 512, 'S', 256]}
def multibox(size, vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [1, -2]
for k, v in enumerate(vgg_source):
if k == 0:
loc_layers += [nn.Conv2d(512,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers +=[nn.Conv2d(512,
cfg[k] * num_classes, kernel_size=3, padding=1)]
else:
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
i = 2
indicator = 3
for k, v in enumerate(extra_layers):
if (k < indicator+1 and k % 2 == 0) or (k > indicator+1 and k % 2 != 0):
loc_layers += [nn.Conv2d(v.out_channels, cfg[i]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[i]
* num_classes, kernel_size=3, padding=1)]
i += 1
return vgg, extra_layers, (loc_layers, conf_layers)
mbox = {
'300': [6, 6, 6, 6, 4, 4]}
def build_net(phase, size=300, num_classes=81):
if size != 300:
print("Error: The input image size is not supported!")
return
return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3),
add_extras(size, extras[str(size)], 1024),
mbox[str(size)], num_classes), num_classes)
| 2.59375 | 3 |
tests/test.py | chromia/wandplus | 0 | 3206 | <reponame>chromia/wandplus<gh_stars>0
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
import wandplus.image as wpi
from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize
import os
import unittest
tmpdir = '_tmp/'
def save(img, function, channel=False, ext='.png'):
if channel:
path = tmpdir + function.__name__ + "_ch" + ext
else:
path = tmpdir + function.__name__ + ext
# print(path)
img.save(filename=path)
class CheckImage(unittest.TestCase):
@classmethod
def setUpClass(self):
os.mkdir(tmpdir)
self.rose = Image(filename='rose:')
self.grad = Image(filename='gradient:', width=400, height=400)
self.logo = Image(filename='logo:')
self.text = Image(filename='label:Confirm', width=200, height=60)
self.text_a = Image(width=70, height=60)
with Drawing() as draw:
draw.font = 'Arial'
draw.font_size = 50
draw.gravity = 'center'
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.text(0, 0, 'A')
draw(self.text_a)
self.rose.save(filename=tmpdir + 'rose.png')
self.grad.save(filename=tmpdir + 'grad.png')
self.logo.save(filename=tmpdir + 'logo.png')
self.text.save(filename=tmpdir + 'text.png')
self.text_a.save(filename=tmpdir + 'a.png')
@classmethod
def tearDownClass(self):
self.rose.destroy()
self.grad.destroy()
self.logo.destroy()
self.text.destroy()
self.text_a.destroy()
def test_adaptiveblur(self):
f = wpi.adaptiveblur
with self.rose.clone() as t:
f(t, 5.0, 3.0)
save(t, f)
with self.rose.clone() as t:
f(t, 5.0, 3.0, channel='red')
save(t, f, True)
def test_adaptiveresize(self):
f = wpi.adaptiveresize
with self.rose.clone() as t:
f(t, int(t.width*1.5), int(t.height*2.0))
save(t, f)
def test_adaptivesharpen(self):
f = wpi.adaptivesharpen
with self.rose.clone() as t:
f(t, 5, 5)
save(t, f)
with self.rose.clone() as t:
f(t, 5, 5, channel='red')
save(t, f, True)
def test_adaptivethreshold(self):
f = wpi.adaptivethreshold
with self.logo.clone() as t:
f(t, 20, 20, int(0.1*t.quantum_range))
save(t, f)
def test_addnoise(self):
f = wpi.addnoise
with self.grad.clone() as t:
f(t, 'gaussian')
save(t, f)
with self.grad.clone() as t:
f(t, 'gaussian', channel='red')
save(t, f, True)
def test_affinetransform(self):
f = wpi.affinetransform
with self.rose.clone() as t:
with Drawing() as d:
d.affine([2.0, 0.0, 0.0, 2.0, 0.0, 0.0])
f(t, d) # not work correctly (IM<6.9.9-36)
save(t, f)
def test_autogamma(self):
f = wpi.autogamma
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_autolevel(self):
f = wpi.autolevel
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_blackthreshold(self):
f = wpi.blackthreshold
with self.grad.clone() as t:
f(t, Color('gray(50%)'))
save(t, f)
def test_blueshift(self):
f = wpi.blueshift
with self.logo.clone() as t:
f(t, 0.5)
save(t, f)
def test_brightnesscontrast(self):
f = wpi.brightnesscontrast
with self.rose.clone() as t:
f(t, -30, 0)
save(t, f)
with self.rose.clone() as t:
f(t, -30, 0, channel='red')
save(t, f, True)
def test_blur(self):
f = wpi.blur
with self.rose.clone() as t:
f(t, 0, 3)
save(t, f)
with self.rose.clone() as t:
f(t, 0, 3, channel='red')
save(t, f, True)
def test_charcoal(self):
f = wpi.charcoal
with self.rose.clone() as t:
f(t, 5, 1)
save(t, f)
def test_chop(self):
f = wpi.chop
with self.grad.clone() as t:
t.gravity = 'north_west'
f(t, 0, 00, 200, 200)
save(t, f)
def test_clamp(self):
f = wpi.clamp # TODO: more useful code
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_clip(self): # NOTE: result is always FAILED.
f = wpi.clip # I don't have an image which has clipping path
with self.rose.clone() as t:
f(t)
save(t, f)
def test_clippath(self): # NOTE: result is always FAILED.
f = wpi.clippath
with self.rose.clone() as t:
f(t, '#1', True)
save(t, f)
def test_clut(self):
f = wpi.clut
with Image(filename='gradient:red-blue', width=1, height=100) as p:
p.rotate(90)
with self.grad.clone() as t:
f(t, p)
save(t, f)
with self.grad.clone() as t:
f(t, p, channel='green')
save(t, f, True)
def test_coalesce(self): # TODO: input optimized .gif file.
f = wpi.coalesce
with Image() as t:
with self.rose.clone() as p:
for i in range(5):
wpi.blur(p, 0, 1)
wpi.add(t, p)
with f(t) as p:
save(p, f)
def test_colordecisionlist(self):
xml = """
<ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
<ColorCorrection id="cc03345">
<SOPNode>
<Slope> 0.9 1.2 0.5 </Slope>
<Offset> 0.4 -0.5 0.6 </Offset>
<Power> 1.0 0.8 1.5 </Power>
</SOPNode>
<SATNode>
<Saturation> 0.85 </Saturation>
</SATNode>
</ColorCorrection>
</ColorCorrectionCollection>
"""
f = wpi.colordecisionlist
with self.rose.clone() as t:
f(t, xml)
save(t, f)
def test_colorize(self):
f = wpi.colorize
with self.grad.clone() as t:
f(t, Color('red'), Color('gray(25%)'))
save(t, f)
def test_colormatrix(self):
f = wpi.colormatrix
with self.logo.clone() as t:
kernel = [
0.5, 0.0, 0.0, 0.0, 0.0,
0.0, 1.5, 0.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0
]
f(t, 5, 5, kernel)
save(t, f)
def test_combine(self):
f = wpi.combine
with Image() as t:
w = 100
h = 100
black = Color('black')
white = Color('white')
with Image(width=w, height=w, background=black) as b:
with Image(width=h, height=h, background=white) as w:
wpi.add(t, b) # add image for red channel
wpi.add(t, b) # add image for green channel
wpi.add(t, w) # add image for blue channel
wpi.setfirstiterator(t) # rewind the index pointer
channel = 1 + 2 + 4 # R + G + B
with f(t, channel) as q:
save(q, f)
def test_comment(self):
f = wpi.comment
with self.grad.clone() as t:
f(t, 'hello')
save(t, f)
def test_compare(self):
f = wpi.compare
with self.rose.clone() as t:
with t.clone() as p:
(c, d) = f(t, p, metric='absolute')
save(c, f)
c.destroy()
with self.rose.clone() as t:
with t.clone() as p:
(c, d) = f(t, p, metric='absolute', channel='red')
save(c, f, True)
c.destroy()
def test_comparelayer(self):
f = wpi.comparelayer
with Image() as t:
with Image(width=50, height=50, background=Color('red')) as p:
wpi.add(t, p)
with Image(width=25, height=25, background=Color('green1')) as q:
for i in range(4):
with q.clone() as qq:
wpi.resetpage(qq, 5*(i+1), 5*(i+1))
wpi.add(t, qq)
with f(t, 'compareany') as r:
save(r, f, ext='.gif')
def test_constitute(self):
f = wpi.constitute
with Image() as t:
w = 2
h = 2
b = [0, 0, 0,
255, 255, 255,
255, 0, 0,
0, 255, 0]
f(t, w, h, 'RGB', 'char', b)
save(t, f)
def test_contrast(self):
f = wpi.contrast
with self.rose.clone() as t:
f(t, False)
save(t, f)
def test_convolve(self):
f = wpi.convolve
kernel = [1/16, 2/16, 1/16,
2/16, 4/16, 2/16,
1/16, 2/16, 1/16]
with self.rose.clone() as t:
f(t, 3, kernel)
save(t, f)
with self.rose.clone() as t:
f(t, 3, kernel, channel='red')
save(t, f, True)
def test_cyclecolormap(self):
f = wpi.cyclecolormap
with self.logo.clone() as t:
f(t, 5)
save(t, f)
def test_cipher(self):
f = wpi.encipher
with self.rose.clone() as t:
f(t, 'password')
save(t, f)
f = wpi.decipher
f(t, 'password')
save(t, f)
def test_deskew(self):
f = wpi.deskew
with Image(width=80, height=40, background=Color('black')) as t:
f(t, 0.5*t.quantum_range) # TODO: find an skewed image as sample
save(t, f)
def test_despeckle(self):
f = wpi.despeckle
with self.rose.clone() as t:
# TODO: add speckle noise
f(t)
save(t, f)
def test_edge(self):
f = wpi.edge
with self.logo.clone() as t:
f(t, 3)
save(t, f)
def test_emboss(self):
f = wpi.emboss
with self.logo.clone() as t:
f(t, 0, 3)
save(t, f)
def test_enhance(self):
f = wpi.enhance
with Image(filename='plasma:', width=100, height=100) as t:
f(t)
save(t, f)
def test_equalize(self):
f = wpi.equalize
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_exportpixels(self):
w = 1
h = 1
channels = 'RGB'
with Image(width=w, height=h, background=Color('red')) as t:
r = wpi.exportpixels(t, 0, 0, w, h, channels, 'double')
self.assertEqual(r[0], 1.0)
self.assertEqual(r[1], 0.0)
self.assertEqual(r[2], 0.0)
def test_extent(self):
f = wpi.extent
with self.rose.clone() as t:
t.gravity = 'center'
t.background_color = Color('blue')
f(t, -10, -10, t.width+20, t.height+20)
save(t, f)
def test_filterimage(self):
f = wpi.filterimage
kernel = [ # Sobel filter
-1.0, 0.0, 1.0,
-2.0, 0.0, 2.0,
-1.0, 0.0, 1.0,
]
with self.rose.clone() as t:
f(t, 3, 3, kernel)
save(t, f)
with self.rose.clone() as t:
f(t, 3, 3, kernel, channel='red')
save(t, f, True)
def test_floodfillpaint(self):
f = wpi.floodfillpaint
with self.logo.clone() as t:
f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0)
save(t, f)
def test_fft(self):
f = wpi.forwardfouriertransform # require IM build option '--with-fftw'
with self.logo.clone() as t: # I couldn't build on Windows...
f(t, True)
save(t, f) # includes two images(magnitude&phase)
f = wpi.inversefouriertransform
with t.sequence[0].clone() as mag:
with t.sequence[1].clone() as phase:
wpi.blur(mag, 0, 0.5) # as degradation
t2 = mag
f(t2, phase, True)
save(t2, f)
def test_haldclut(self):
f = wpi.haldclut # TODO: more useful code
with Image(filename='hald:12') as p:
with self.rose.clone() as t:
f(t, p)
save(t, f)
with self.rose.clone() as t:
f(t, p, channel='red')
save(t, f, True)
def test_implode(self):
f = wpi.implode
with self.rose.clone() as t:
f(t, 1.0)
save(t, f)
def test_importpixels(self):
f = wpi.importpixels
with Image(width=4, height=4, background=Color('red')) as t:
w = 2
h = 2
b = [0, 0, 0,
255, 255, 255,
255, 0, 0,
0, 255, 0]
f(t, 1, 1, w, h, 'RGB', 'char', b)
save(t, f)
def test_label(self):
f = wpi.label
with self.rose.clone() as t:
f(t, 'hello')
save(t, f)
def test_localcontrast(self):
f = wpi.localcontrast
with self.logo.clone() as t:
f(t, 5, 30)
save(t, f)
def test_magnify(self):
f = wpi.magnify
with self.rose.clone() as t:
f(t)
save(t, f)
def test_minify(self):
f = wpi.minify
with self.rose.clone() as t:
f(t)
save(t, f)
def test_montage(self):
f = wpi.montage
with self.rose.clone() as base:
with Image() as dst:
rows = 2
columns = 3
for i in range(rows * columns):
wpi.add(dst, base)
tile = "{0}x{1}+0+0".format(columns, rows)
thumb = "80x50+4+3"
frame = "15x15+3+3"
mode = "frame"
with Drawing() as d:
with f(dst, d, tile, thumb, mode, frame) as result:
save(result, f)
def test_morph(self):
f = wpi.morph
color = Color('white')
with self.rose.clone() as t:
with Image(width=t.width, height=t.height, background=color) as p:
wpi.add(t, p)
wpi.setfirstiterator(t)
wpi.setdelay(t, 60)
with f(t, 5) as q:
save(q, f, ext='.gif')
def test_morphology(self):
f = wpi.morphology
with self.logo.clone() as t:
f(t, 'dilate', 1, 'Diamond')
save(t, f)
with self.logo.clone() as t:
f(t, 'dilate', 1, 'Diamond', channel='red')
save(t, f, True)
def test_motionblur(self):
f = wpi.motionblur
with self.logo.clone() as t:
f(t, 30, 10, 45)
save(t, f)
with self.logo.clone() as t:
f(t, 30, 10, 45, channel='red')
save(t, f, True)
def test_oilpaint(self):
f = wpi.oilpaint
with self.rose.clone() as t:
f(t, 2.0)
save(t, f)
def test_opaquepaint(self):
f = wpi.opaquepaint
with self.logo.clone() as t:
f(t, Color('red'), Color('blue'), 1.0, False)
save(t, f)
with self.logo.clone() as t:
f(t, Color('red'), Color('blue'), 1.0, False, channel='blue')
save(t, f, True)
def test_orderedposterize(self):
f = wpi.orderedposterize
with self.grad.clone() as t:
f(t, 'o4x4,3,3')
save(t, f)
with self.grad.clone() as t:
f(t, 'o4x4,3,3', channel='red')
save(t, f, True)
def test_polaroid(self):
f = wpi.polaroid
with self.logo.clone() as t:
with Drawing() as d:
f(t, d, 1.0)
save(t, f)
def test_posterize(self):
f = wpi.posterize
with self.rose.clone() as t:
f(t, 3, True)
save(t, f)
def test_raiseimage(self):
f = wpi.raiseimage
with self.rose.clone() as t:
f(t, 10, 10, 10, 10, True)
save(t, f)
def test_randomthreshold(self):
f = wpi.randomthreshold
with self.text_a.clone() as t:
rng = t.quantum_range
f(t, int(rng * 0.05), int(rng * 0.95))
save(t, f)
with self.text_a.clone() as t:
rng = t.quantum_range
f(t, int(rng * 0.05), int(rng * 0.95), channel='red')
save(t, f, True)
def test_remap(self):
f = wpi.remap
with self.logo.clone() as t:
with self.rose.clone() as p:
f(t, p, 'nodither')
save(t, f)
def test_resample(self):
f = wpi.resample
with self.rose.clone() as t:
dpi = 72 * 2
f(t, dpi, dpi, 'lanczos', 1.0)
save(t, f)
def test_roll(self):
f = wpi.roll
with self.rose.clone() as t:
f(t, 10, 10)
save(t, f)
def test_rotationalblur(self):
f = wpi.rotationalblur
with self.rose.clone() as t:
f(t, 45)
save(t, f)
with self.rose.clone() as t:
f(t, 45, channel='red')
save(t, f, True)
def test_scale(self):
f = wpi.scale
with self.rose.clone() as t:
f(t, t.width*2, t.height*2)
save(t, f)
def test_segment(self):
f = wpi.segment
with self.logo.clone() as t:
f(t, 'rgb', False, 5, 20)
save(t, f)
def test_selectiveblur(self):
f = wpi.selectiveblur
with self.logo.clone() as t:
f(t, 20, 20, 0.5*t.quantum_range)
save(t, f)
with self.logo.clone() as t:
f(t, 20, 20, 0.5*t.quantum_range, channel='red')
save(t, f, True)
def test_separate_channel(self):
f = wpi.separate_channel
with self.rose.clone() as t:
f(t, 'red')
save(t, f)
def test_sepiatone(self):
f = wpi.sepiatone
with self.rose.clone() as t:
f(t, 0.5*t.quantum_range)
save(t, f)
def test_shade(self):
f = wpi.shade
with self.logo.clone() as t:
f(t, True, 45, 135)
save(t, f)
def test_shadow(self):
f = wpi.shadow
with self.text.clone() as t:
with self.text.clone() as p:
p.negate()
f(p, 100, 2, 10, 10)
t.composite_channel('default_channels', p, 'overlay')
save(t, f)
def test_sharpen(self):
f = wpi.sharpen
with self.rose.clone() as t:
f(t, 3, 3)
save(t, f)
with self.rose.clone() as t:
f(t, 3, 3, channel='red')
save(t, f, True)
def test_shave(self):
f = wpi.shave
with self.logo.clone() as t:
f(t, 100, 100)
save(t, f)
def test_shear(self):
f = wpi.shear
with self.grad.clone() as t:
f(t, Color('red'), 0, 10)
save(t, f)
def test_sigmoidalcontrast(self):
f = wpi.sigmoidalcontrast
with self.rose.clone() as t:
f(t, True, 3, 3)
save(t, f)
with self.rose.clone() as t:
f(t, True, 3, 3, channel='red')
save(t, f, True)
def test_sketch(self):
f = wpi.sketch
with self.logo.clone() as t:
f(t, 10, 10, 45)
save(t, f)
def test_smush(self):
f = wpi.smush
def makeletter(letter, w, h):
img = Image(width=w, height=h)
with Drawing() as d:
d.font = 'Arial'
d.font_size = 24
d.gravity = 'center'
d.text(0, 0, letter)
d(img)
return img
with Image() as t:
with makeletter('A', 50, 30) as a:
with makeletter('B', 50, 30) as b:
wpi.add(t, a)
wpi.add(t, b)
wpi.setfirstiterator(t)
with f(t, False, -3) as p:
save(p, f)
def test_solarize(self):
f = wpi.solarize
with self.rose.clone() as t:
f(t, 0.4*t.quantum_range)
save(t, f)
with self.rose.clone() as t:
f(t, 0.4*t.quantum_range, channel='red')
save(t, f, True)
def test_splice(self):
f = wpi.splice
with self.rose.clone() as t:
t.gravity = 'center'
f(t, t.width//2, t.height//2, 20, 20)
save(t, f)
def test_sparsecolor(self):
f = wpi.sparsecolor
with Image(width=100, height=100, background=Color('black')) as t:
f(t, 'default_channels', 'bilinear',
[0, 0, 1.0, 0.0, 0.0, 1.0,
100, 100, 0.0, 1.0, 1.0, 1.0])
save(t, f)
def test_spread(self):
f = wpi.spread
with self.logo.clone() as t:
f(t, 20)
save(t, f)
def test_statistic(self):
f = wpi.statistic
with self.rose.clone() as t:
f(t, 'gradient', 4, 4)
save(t, f)
with self.rose.clone() as t:
f(t, 'gradient', 4, 4, channel='red')
save(t, f, True)
def test_stegano(self):
f = wpi.stegano
with self.rose.clone() as t:
w = 50
h = 40
offset = 15
tmpfile = 'tmp.png'
with Image(width=w, height=h, background=Color('white')) as p:
with Drawing() as d:
d.gravity = 'center'
d.fill_color = Color('black')
d.text(0, 0, 'Watch\nthe\nPidgeon')
d(p)
with f(t, p, offset) as q:
q.save(filename=tmpfile)
try:
with Image() as q:
wpi.setsizeoffset(q, w, h, offset)
q.read(filename='stegano:' + tmpfile)
save(q, f)
except Exception:
raise
finally:
os.remove(tmpfile)
def test_stereo(self):
f = wpi.stereo
with self.rose.clone() as t:
with self.rose.clone() as p:
p.negate()
with f(t, p) as q:
save(q, f)
def test_swirl(self):
f = wpi.swirl
with self.rose.clone() as t:
f(t, 180)
save(t, f)
def test_texture(self):
f = wpi.texture
with Image(width=300, height=200) as t:
with self.rose.clone() as p:
with f(t, p) as q:
save(q, f)
def test_thumbnail(self):
f = wpi.thumbnail
with self.logo.clone() as t:
f(t, 100, 100)
save(t, f)
def test_tint(self):
f = wpi.tint
with self.rose.clone() as t:
f(t, Color('rgb'), Color('gray(25%)'))
save(t, f)
def test_vignette(self):
f = wpi.vignette
with self.logo.clone() as t:
wpi.minify(t)
t.background_color = Color('black')
f(t, 0, 10, 20, 20)
save(t, f)
def test_wave(self):
f = wpi.wave
with self.grad.clone() as t:
f(t, 40, 200)
save(t, f)
def test_whitethreshold(self):
f = wpi.whitethreshold
with self.grad.clone() as t:
f(t, Color('gray(50%)'))
save(t, f)
class CheckTextUtil(unittest.TestCase):
def test_imagesize(self):
with Drawing() as d:
text = 'check'
d.font = 'Arial'
d.font_size = 36
size = calcSuitableImagesize(d, text)
print('calcSuitableImagesize: ', size)
self.assertTrue(size[0] > 0 and size[1] > 0)
def test_fontsize(self):
w = 100
h = 100
with Drawing() as d:
text = 'check'
d.font = 'Arial'
fontsize = calcSuitableFontsize(d, text, width=w)
print('calcSuitableImagesize[W]: ', fontsize)
self.assertTrue(fontsize > 0)
fontsize = calcSuitableFontsize(d, text, height=h)
print('calcSuitableImagesize[H]: ', fontsize)
self.assertTrue(fontsize > 0)
if __name__ == '__main__':
unittest.main()
| 2.421875 | 2 |
src/librender/tests/test_mesh.py | tizian/layer-laboratory | 7 | 3207 | <gh_stars>1-10
import mitsuba
import pytest
import enoki as ek
from enoki.dynamic import Float32 as Float
from mitsuba.python.test.util import fresolver_append_path
from mitsuba.python.util import traverse
def test01_create_mesh(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype
from mitsuba.render import Mesh
m = Mesh("MyMesh", 3, 2)
m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]
m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]
m.parameters_changed()
assert str(m) == """Mesh[
name = "MyMesh",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [1, 1, 0]
],
vertex_count = 3,
vertices = [36 B of vertex data],
face_count = 2,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0.96
]"""
@fresolver_append_path
def test02_ply_triangle(variant_scalar_rgb):
from mitsuba.core import UInt32, Vector3f
from mitsuba.core.xml import load_string
m = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle.ply"/>
<boolean name="face_normals" value="true"/>
</shape>
""")
positions = m.vertex_positions_buffer()
faces = m.faces_buffer()
assert not m.has_vertex_normals()
assert ek.slices(positions) == 9
assert ek.allclose(positions[0:3], [0, 0, 0])
assert ek.allclose(positions[3:6], [0, 0, 1])
assert ek.allclose(positions[6:9], [0, 1, 0])
assert ek.slices(faces) == 3
assert faces[0] == UInt32(0)
assert faces[1] == UInt32(1)
assert faces[2] == UInt32(2)
@fresolver_append_path
def test03_ply_computed_normals(variant_scalar_rgb):
from mitsuba.core import Vector3f
from mitsuba.core.xml import load_string
"""Checks(automatic) vertex normal computation for a PLY file that
doesn't have them."""
shape = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle.ply"/>
</shape>
""")
normals = shape.vertex_normals_buffer()
assert shape.has_vertex_normals()
# Normals are stored in half precision
assert ek.allclose(normals[0:3], [-1, 0, 0])
assert ek.allclose(normals[3:6], [-1, 0, 0])
assert ek.allclose(normals[6:9], [-1, 0, 0])
def test04_normal_weighting_scheme(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype, Vector3f
from mitsuba.render import Mesh
import numpy as np
"""Tests the weighting scheme that is used to compute surface normals."""
m = Mesh("MyMesh", 5, 2, has_vertex_normals=True)
vertices = m.vertex_positions_buffer()
normals = m.vertex_normals_buffer()
a, b = 1.0, 0.5
vertices[:] = [0, 0, 0, -a, 1, 0, a, 1, 0, -b, 0, 1, b, 0, 1]
n0 = Vector3f(0.0, 0.0, -1.0)
n1 = Vector3f(0.0, 1.0, 0.0)
angle_0 = ek.pi / 2.0
angle_1 = ek.acos(3.0 / 5.0)
n2 = n0 * angle_0 + n1 * angle_1
n2 /= ek.norm(n2)
n = np.vstack([n2, n0, n0, n1, n1]).transpose()
m.faces_buffer()[:] = [0, 1, 2, 0, 3, 4]
m.recompute_vertex_normals()
for i in range(5):
assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4)
@fresolver_append_path
def test05_load_simple_mesh(variant_scalar_rgb):
from mitsuba.core.xml import load_string
"""Tests the OBJ and PLY loaders on a simple example."""
for mesh_format in ["obj", "ply"]:
shape = load_string("""
<shape type="{0}" version="2.0.0">
<string name="filename" value="resources/data/tests/{0}/cbox_smallbox.{0}"/>
</shape>
""".format(mesh_format))
positions = shape.vertex_positions_buffer()
faces = shape.faces_buffer()
assert shape.has_vertex_normals()
assert ek.slices(positions) == 72
assert ek.slices(faces) == 36
assert ek.allclose(faces[6:9], [4, 5, 6])
assert ek.allclose(positions[:5], [130, 165, 65, 82, 165])
@pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized'])
@pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv'])
@pytest.mark.parametrize('face_normals', [True, False])
def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals):
"""Tests the OBJ & PLY loaders with combinations of vertex / face normals,
presence and absence of UVs, etc.
"""
from mitsuba.core.xml import load_string
def test():
shape = load_string("""
<shape type="{0}" version="2.0.0">
<string name="filename" value="resources/data/tests/{0}/rectangle_{1}.{0}" />
<boolean name="face_normals" value="{2}" />
</shape>
""".format(mesh_format, features, str(face_normals).lower()))
assert shape.has_vertex_normals() == (not face_normals)
positions = shape.vertex_positions_buffer()
normals = shape.vertex_normals_buffer()
texcoords = shape.vertex_texcoords_buffer()
faces = shape.faces_buffer()
(v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]]
assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3)
assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3)
assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3)
if 'uv' in features:
assert shape.has_vertex_texcoords()
(uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]]
# For OBJs (and .serialized generated from OBJ), UV.y is flipped.
if mesh_format in ['obj', 'serialized']:
assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3)
else:
assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3)
if shape.has_vertex_normals():
for n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]:
assert ek.allclose(n, [0.0, 1.0, 0.0])
return fresolver_append_path(test)()
@fresolver_append_path
def test07_ply_stored_attribute(variant_scalar_rgb):
from mitsuba.core import Vector3f
from mitsuba.core.xml import load_string
m = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle_face_colors.ply"/>
</shape>
""")
assert str(m) == """PLYMesh[
name = "triangle_face_colors.ply",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [0, 1, 1]
],
vertex_count = 3,
vertices = [72 B of vertex data],
face_count = 1,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0,
mesh attributes = [
face_color: 3 floats
]
]"""
def test08_mesh_add_attribute(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype
from mitsuba.render import Mesh
m = Mesh("MyMesh", 3, 2)
m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]
m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]
m.parameters_changed()
m.add_attribute("vertex_color", 3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0]
assert str(m) == """Mesh[
name = "MyMesh",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [1, 1, 0]
],
vertex_count = 3,
vertices = [72 B of vertex data],
face_count = 2,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0.96,
mesh attributes = [
vertex_color: 3 floats
]
]""" | 1.859375 | 2 |
agsadmin/sharing_admin/community/groups/Group.py | christopherblanchfield/agsadmin | 2 | 3208 | from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
from ...._utils import send_session_request
from ..._PortalEndpointBase import PortalEndpointBase
from .CreateUpdateGroupParams import CreateUpdateGroupParams
class Group(PortalEndpointBase):
@property
def id(self):
return self._pdata["id"]
@property
def _url_full(self):
return "{0}/{1}".format(self._url_base, self.id)
def __init__(self, requests_session, url_base, id):
super().__init__(requests_session, url_base)
self._pdata = {"id": id}
def get_properties(self):
"""
Gets the properties of the item.
"""
return self._get()
def update(self, update_group_params, clear_empty_fields=False):
"""
Updates the group properties.
"""
update_group_params = update_group_params._get_params() if isinstance(
update_group_params, CreateUpdateGroupParams) else update_group_params.copy()
if not "clearEmptyFields" in update_group_params:
update_group_params["clearEmptyFields"] = clear_empty_fields
r = self._create_operation_request(self, "update", method="POST", data=update_group_params)
return send_session_request(self._session, r).json() | 2.109375 | 2 |
amy/workshops/migrations/0191_auto_20190809_0936.py | code-review-doctor/amy | 53 | 3209 | <gh_stars>10-100
# Generated by Django 2.1.7 on 2019-08-09 09:36
from django.db import migrations, models
def migrate_public_event(apps, schema_editor):
"""Migrate options previously with no contents (displayed as "Other:")
to a new contents ("other").
The field containing these options is in CommonRequest abstract model,
implemented in WorkshopRequest, WorkshopInquiryRequest, and
SelfOrganizedSubmission models."""
WorkshopRequest = apps.get_model('workshops', 'WorkshopRequest')
WorkshopInquiryRequest = apps.get_model('extrequests',
'WorkshopInquiryRequest')
SelfOrganizedSubmission = apps.get_model('extrequests',
'SelfOrganizedSubmission')
WorkshopRequest.objects.filter(public_event="") \
.update(public_event="other")
WorkshopInquiryRequest.objects.filter(public_event="") \
.update(public_event="other")
SelfOrganizedSubmission.objects.filter(public_event="") \
.update(public_event="other")
class Migration(migrations.Migration):
dependencies = [
('workshops', '0190_auto_20190728_1118'),
('extrequests', '0008_auto_20190809_1004'),
]
operations = [
migrations.AlterField(
model_name='workshoprequest',
name='host_responsibilities',
field=models.BooleanField(default=False, verbose_name='I understand <a href="https://docs.carpentries.org/topic_folders/hosts_instructors/hosts_instructors_checklist.html#host-checklist">the responsibilities of the workshop host</a>, including recruiting local helpers to support the workshop (1 helper for every 8-10 learners).'),
),
migrations.AlterField(
model_name='workshoprequest',
name='requested_workshop_types',
field=models.ManyToManyField(help_text='If your learners are new to programming and primarily interested in working with data, Data Carpentry is likely the best choice. If your learners are interested in learning more about programming, including version control and automation, Software Carpentry is likely the best match. If your learners are people working in library and information related roles interested in learning data and software skills, Library Carpentry is the best choice. Please visit the <a href="https://software-carpentry.org/lessons/">Software Carpentry lessons page</a>, <a href="http://www.datacarpentry.org/lessons/">Data Carpentry lessons page</a>, or the <a href="https://librarycarpentry.org/lessons/">Library Carpentry lessons page</a> for more information about any of our lessons.', limit_choices_to={'active': True}, to='workshops.Curriculum', verbose_name='Which Carpentries workshop are you requesting?'),
),
migrations.AlterField(
model_name='workshoprequest',
name='scholarship_circumstances',
field=models.TextField(blank=True, help_text='Required only if you request a scholarship.', verbose_name='Please explain the circumstances for your scholarship request and let us know what budget you have towards The Carpentries workshop fees.'),
),
migrations.AlterField(
model_name='workshoprequest',
name='public_event',
field=models.CharField(blank=True, choices=[('invite', 'This event is open to learners by invitation only.'), ('closed', 'This event is open to learners inside of my institution.'), ('public', 'This event is open to learners outside of my institution.'), ('other', 'Other:')], default='', help_text='Many of our workshops restrict registration to learners from the hosting institution. If your workshop will be open to registrants outside of your institution please let us know below.', max_length=20, verbose_name='Is this workshop open to the public?'),
),
migrations.RunPython(migrate_public_event),
]
| 1.765625 | 2 |
pix2pix/Discriminator.py | yubin1219/GAN | 0 | 3210 | <gh_stars>0
import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d, use_sigmoid=False) :
super(Discriminator, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1),
nn.LeakyReLU(0.2, True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True)
)
self.conv3 = nn.Sequential(
nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1),
norm_layer(ndf * 4),
nn.LeakyReLU(0.2, True)
)
self.conv4 = nn.Sequential(
nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1),
norm_layer(ndf * 8),
nn.LeakyReLU(0.2, True)
)
if use_sigmoid:
self.conv5 = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1),
nn.Sigmoid()
)
else:
self.conv5 = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1)
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
| 2.328125 | 2 |
tests/slicebuilders/subpopulations/test_length.py | ANarayan/robustness-gym | 0 | 3211 | from unittest import TestCase
import numpy as np
from robustnessgym.cachedops.spacy import Spacy
from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation
from tests.testbeds import MockTestBedv0
class TestLengthSubpopulation(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
self.testbed.dataset = Spacy()(self.testbed.dataset, columns=["text"])
def test_score(self):
# Create the length subpopulation
length = LengthSubpopulation(intervals=[(1, 3), (4, 5)])
# Compute scores
scores = length.score(self.testbed.dataset[:], columns=["text"])
self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5])))
print(self.testbed.dataset.column_names)
print(Spacy.retrieve(self.testbed.dataset[:], ["text"]))
# Apply the subpopulation
slices, slice_matrix = length(self.testbed.dataset, columns=["text"])
# Check that the slice membership lines up
self.assertTrue(np.allclose(slice_matrix, np.array([[0, 1]] * 6)))
| 2.671875 | 3 |
pulsar_spectra/catalogue_papers/Jankowski_2018_raw_to_yaml.py | NickSwainston/pulsar_spectra | 0 | 3212 | import json
from astroquery.vizier import Vizier
with open("Jankowski_2018_raw.txt", "r") as raw_file:
lines = raw_file.readlines()
print(lines)
pulsar_dict = {}
for row in lines[3:]:
row = row.split("|")
print(row)
pulsar = row[0].strip().replace("−", "-")
freqs = []
fluxs = []
flux_errs = []
# If no error means it's an upper limit andnow sure how to handle it
if row[1].strip() != "" and row[2].strip() != "":
freqs.append(728)
fluxs.append(float(row[1].strip()))
flux_errs.append(float(row[2].strip()))
if row[3].strip() != "" and row[4].strip() != "":
freqs.append(1382)
fluxs.append(float(row[3].strip()))
flux_errs.append(float(row[4].strip()))
if row[5].strip() != "" and row[6].strip() != "":
freqs.append(3100)
fluxs.append(float(row[5].strip()))
flux_errs.append(float(row[6].strip()))
pulsar_dict[pulsar] = {"Frequency MHz":freqs, "Flux Density mJy":fluxs, "Flux Density error mJy":flux_errs}
with open("Jankowski_2018.yaml", "w") as cat_file:
cat_file.write(json.dumps(pulsar_dict))
print(pulsar_dict) | 2.765625 | 3 |
integration-tests/run-intg-test.py | NishikaDeSilva/identity-test-integration | 4 | 3213 | # Copyright (c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# importing required modules
import sys
from xml.etree import ElementTree as ET
import toml
import subprocess
import wget
import logging
import inspect
import os
import shutil
import pymysql
import sqlparse
import re
from pathlib import Path
import urllib.request as urllib2
from xml.dom import minidom
import intg_test_manager as cm
from subprocess import Popen, PIPE
import os
from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \
DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS
from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \
DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \
DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \
ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE
database_names = []
db_engine = None
sql_driver_location = None
identity_db_url = None
identity_db_username = None
identity_db_password = <PASSWORD>
identity_db_driver = None
shared_db_url = None
shared_db_username = None
shared_db_password = <PASSWORD>
shared_db_driver = None
identity_db = "WSO2_IDENTITY_DB"
shared_db = "WSO2_SHARED_DB"
def get_db_meta_data(argument):
switcher = DB_META_DATA
return switcher.get(argument, False)
def add_environmental_variables():
if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper():
identity_url = cm.database_config[
'url'] + "/" + identity_db + "?useSSL=false&autoReconnect=true&requireSSL=false" \
"&verifyServerCertificate=false"
shared_url = cm.database_config[
'url'] + "/" + shared_db + \
"?useSSL=false&autoReconnect=true&requireSSL=false" \
"&verifyServerCertificate=false"
user = cm.database_config['user']
elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper():
identity_url= cm.database_config['url'] + "/" + DEFAULT_ORACLE_SID
shared_url= cm.database_config['url'] + "/" + DEFAULT_ORACLE_SID
user = cm.database_config['user']
elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper():
identity_url = cm.database_config['url'] + ";" + "databaseName=" + identity_db
shared_url = cm.database_config['url'] + ";" + "databaseName=" + shared_db
user = cm.database_config['user']
else:
shared_url = cm.database_config['url'] + "/" + shared_db
identity_url = cm.database_config['url'] + "/" + identity_db
user = cm.database_config['user']
password = cm.database_config['password']
driver_class_name = cm.database_config['driver_class_name']
os.environ["SHARED_DATABASE_URL"] = shared_url
os.environ["SHARED_DATABASE_USERNAME"] = user
os.environ["SHARED_DATABASE_PASSWORD"] = password
os.environ["SHARED_DATABASE_DRIVER"] = driver_class_name
os.environ["IDENTITY_DATABASE_URL"] = identity_url
os.environ["IDENTITY_DATABASE_USERNAME"] = user
os.environ["IDENTITY_DATABASE_PASSWORD"] = password
os.environ["IDENTITY_DATABASE_DRIVER"] = driver_class_name
logger.info("Added environmental variables for integration test")
def modify_datasources():
file_path = Path(storage_dist_abs_path / datasource_path)
if sys.platform.startswith('win'):
file_path = cm.winapi_path(file_path)
logger.info("Modifying datasource: " + str(file_path))
deployment_toml_config = toml.load(file_path)
logger.info("loading dep,loyment.toml file")
logger.info(deployment_toml_config)
for key in deployment_toml_config:
if key == 'database':
database_config = deployment_toml_config[key]
for key in database_config:
if key == 'identity_db':
identity_db_config = database_config['identity_db']
identity_db_config ['url'] = "$env{IDENTITY_DATABASE_URL}"
identity_db_config ['username'] = "$env{IDENTITY_DATABASE_USERNAME}"
identity_db_config ['password'] = <PASSWORD>}"
identity_db_config ['driver'] = "$env{IDENTITY_DATABASE_DRIVER}"
database_names.append(identity_db)
if key == 'shared_db':
shared_db_config = database_config['shared_db']
shared_db_config ['url'] = "$env{SHARED_DATABASE_URL}"
shared_db_config ['username'] = "$env{SHARED_DATABASE_USERNAME}"
shared_db_config ['password'] = <PASSWORD>{<PASSWORD>}"
shared_db_config ['driver'] = "$env{SHARED_DATABASE_DRIVER}"
database_names.append(shared_db)
with open(file_path, 'w') as writer:
writer.write(toml.dumps(deployment_toml_config))
# Since we have added a method to clone a given git branch and checkout to the latest released tag it is not required to
# modify pom files. Hence in the current implementation this method is not using.
# However, in order to execute this method you can define pom file paths in const_<prod>.py as a constant
# and import it to run-intg-test.py. Thereafter assign it to global variable called pom_file_paths in the
# configure_product method and call the modify_pom_files method.
def modify_pom_files():
for pom in POM_FILE_PATHS:
file_path = Path(cm.workspace + "/" + cm.product_id + "/" + pom)
if sys.platform.startswith('win'):
file_path = cm.winapi_path(file_path)
logger.info("Modifying pom file: " + str(file_path))
ET.register_namespace('', NS['d'])
artifact_tree = ET.parse(file_path)
artifarct_root = artifact_tree.getroot()
data_sources = artifarct_root.find('d:build', NS)
plugins = data_sources.find('d:plugins', NS)
for plugin in plugins.findall('d:plugin', NS):
artifact_id = plugin.find('d:artifactId', NS)
if artifact_id is not None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID:
configuration = plugin.find('d:configuration', NS)
system_properties = configuration.find('d:systemProperties', NS)
for neighbor in system_properties.iter('{' + NS['d'] + '}' + CARBON_NAME):
neighbor.text = cm.modify_distribution_name(neighbor)
for prop in system_properties:
name = prop.find('d:name', NS)
if name is not None and name.text == CARBON_NAME:
for data in prop:
if data.tag == VALUE_TAG:
data.text = cm.modify_distribution_name(data)
break
artifact_tree.write(file_path)
#TODO: Improve the method in generic way to support all products
def save_log_files():
log_storage = Path(cm.workspace + "/" + LOG_STORAGE)
if not Path.exists(log_storage):
Path(log_storage).mkdir(parents=True, exist_ok=True)
log_file_paths = ARTIFACT_REPORTS_PATHS
if log_file_paths:
for file in log_file_paths:
absolute_file_path = Path(cm.workspace + "/" + cm.product_id + "/" + file)
if Path.exists(absolute_file_path):
cm.copy_file(absolute_file_path, log_storage)
else:
logger.error("File doesn't contain in the given location: " + str(absolute_file_path))
#TODO: Improve the method in generic way to support all products
def save_test_output():
report_folder = Path(cm.workspace + "/" + TEST_OUTPUT_DIR_NAME)
logger.info(str(report_folder))
if Path.exists(report_folder):
shutil.rmtree(report_folder)
logger.info(str(ARTIFACT_REPORTS_PATHS))
logger.info(str(type(ARTIFACT_REPORTS_PATHS)))
report_file_paths = ARTIFACT_REPORTS_PATHS
for key, value in report_file_paths.items():
for file in value:
absolute_file_path = Path(cm.workspace + "/" + cm.product_id + "/" + file)
if Path.exists(absolute_file_path):
report_storage = Path(cm.workspace + "/" + TEST_OUTPUT_DIR_NAME + "/" + key)
cm.copy_file(absolute_file_path, report_storage)
logger.info("Report successfully copied")
else:
logger.error("File doesn't contain in the given location: " + str(absolute_file_path))
#TODO: Improve the method in generic way to support all products
# def set_custom_testng():
# if cm.use_custom_testng_file == "TRUE":
# testng_source = Path(cm.workspace + "/" + "testng.xml")
# testng_destination = Path(cm.workspace + "/" + cm.product_id + "/" + TESTNG_DIST_XML_PATHS)
# testng_server_mgt_source = Path(cm.workspace + "/" + "testng-server-mgt.xml")
# testng_server_mgt_destination = Path(cm.workspace + "/" + cm.product_id + "/" + TESTNG_SERVER_MGT_DIST)
# # replace testng source
# cm.replace_file(testng_source, testng_destination)
# # replace testng server mgt source
# cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination)
def configure_product():
try:
global datasource_path
global target_dir_abs_path
global storage_dist_abs_path
global pom_file_paths
datasource_path = DATASOURCE_PATHS
zip_name = dist_name + ZIP_FILE_EXTENSION
storage_dir_abs_path = Path(cm.workspace + "/" + PRODUCT_STORAGE_DIR_NAME)
target_dir_abs_path = Path(cm.workspace + "/" + cm.product_id + "/" + DISTRIBUTION_PATH)
storage_dist_abs_path = Path(storage_dir_abs_path / dist_name)
storage_zip_abs_path = Path(storage_dir_abs_path / zip_name)
configured_dist_storing_loc = Path(target_dir_abs_path / dist_name)
script_name = Path(WSO2SERVER)
script_path = Path(storage_dist_abs_path / script_name)
cm.extract_product(storage_dir_abs_path, storage_zip_abs_path)
cm.attach_jolokia_agent(script_path)
cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH))
if datasource_path is not None:
modify_datasources()
else:
logger.info("Datasource paths are not defined in the config file")
os.remove(str(storage_zip_abs_path))
cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path)
cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH)
shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error)
return database_names
except FileNotFoundError as e:
logger.error("Error occurred while finding files", exc_info=True)
except IOError as e:
logger.error("Error occurred while accessing files", exc_info=True)
except Exception as e:
logger.error("Error occurred while configuring the product", exc_info=True)
def build_source_without_tests(source_path):
"""Build the product-source.
"""
logger.info('Building the source skipping tests')
if sys.platform.startswith('win'):
subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path)
else:
subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path)
logger.info('Module build is completed. Module: ' + str(source_path))
def main():
try:
global logger
global dist_name
logger = cm.function_logger(logging.DEBUG, logging.DEBUG)
if sys.version_info < (3, 6):
raise Exception(
"To run run-intg-test.py script you must have Python 3.6 or latest. Current version info: " + sys.version_info)
cm.read_property_files()
if not cm.validate_property_readings():
raise Exception(
"Property file doesn't have mandatory key-value pair. Please verify the content of the property file "
"and the format")
# get properties assigned to local variables
pom_path = DIST_POM_PATH
engine = cm.db_engine.upper()
db_meta_data = get_db_meta_data(engine)
distribution_path = DISTRIBUTION_PATH
# construct the database configurations
cm.construct_db_config(db_meta_data)
# clone the repository
cm.clone_repo()
if cm.test_mode == "RELEASE":
cm.checkout_to_tag()
# product name retrieve from product pom files
dist_name = cm.get_dist_name(pom_path)
# build the product without test once to make samples and required artifacts to be available.
build_source_without_tests(cm.workspace + "/" + cm.product_id + "/")
cm.get_latest_released_dist()
elif cm.test_mode == "SNAPSHOT":
# product name retrieve from product pom files
dist_name = cm.get_dist_name(pom_path)
cm.build_snapshot_dist(distribution_path)
elif cm.test_mode == "WUM":
dist_name = cm.get_dist_name_wum()
# populate databases
db_names = configure_product()
if db_names is None or not db_names:
raise Exception("Failed the product configuring")
cm.setup_databases(db_names, db_meta_data)
# run integration tests
# Buld Common module
add_environmental_variables()
module_path = Path(cm.workspace + "/" + cm.product_id + "/" + 'modules/integration/tests-common')
logger.info('Building common module. Build path: '+ str(module_path) + ' \n')
cm.build_module(module_path)
intg_module_path = Path(cm.workspace + "/" + cm.product_id + "/" + INTEGRATION_PATH)
logger.info('Building integration module. Build path: '+ str(intg_module_path) + ' \n')
cm.build_module(intg_module_path)
save_test_output()
cm.create_output_property_fle()
except Exception as e:
logger.error("Error occurred while running the run-intg-test.py script", exc_info=True)
except BaseException as e:
logger.error("Error occurred while doing the configuration", exc_info=True)
if __name__ == "__main__":
main()
| 1.34375 | 1 |
src/pytest_notification/sound.py | rhpvorderman/pytest-notification | 2 | 3214 | # Copyright (c) 2019 Leiden University Medical Center
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import subprocess
import sys
from pathlib import Path
SOUNDS_DIR = (Path(__file__).parent / Path("sounds")).absolute()
DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path("applause")
DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path("buzzer")
def play_sound(sound_file: Path):
if sys.platform == "linux":
# paplay comes from PulseAudio and should be installed by default on
# most systems.
_play_sound_unix(sound_file.with_suffix(".oga"), program="paplay")
elif sys.platform == "darwin":
# Afplay comes installed by default on Macintosh
_play_sound_unix(sound_file.with_suffix(".mp3"), program="afplay")
else:
# A windows implementation should be possible with the winsound
# implementation, but that does not play ogg audio.
raise NotImplementedError(
"Playing sounds not supported by pytest-notification on {}"
"".format(sys.platform))
def _play_sound_unix(sound_file: Path, program):
"""
Play a sound file on unix with the program.
:param sound_file: Path to the sound file.
:param program: Which program to use.
:return: No returns. Plays a sound file.
"""
# Play the sound non blocking, use Popen.
subprocess.Popen([program, str(sound_file)])
| 1.9375 | 2 |
7KYU/next_prime.py | yaznasivasai/python_codewars | 4 | 3215 | <reponame>yaznasivasai/python_codewars
from math import sqrt
def is_simple(n: int) -> bool:
if n % 2 == 0 and n != 2:
return False
for i in range (3, int(sqrt(n)) + 2, 2):
if n % i == 0 and n != i:
return False
return True
def next_prime(n: int) -> int:
n += 1
if n <= 2:
return 2
else:
if n % 2 == 0:
n += 1
while not is_simple(n):
n += 2
return n | 4.0625 | 4 |
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py | manxueitp/cozmo-test | 0 | 3216 | #!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''"If This Then That" Gmail example
This example demonstrates how "If This Then That" (http://ifttt.com) can be used
make Cozmo respond when a Gmail account receives an email. Instructions below
will lead you through setting up an applet on the IFTTT website. When the applet
trigger is called (which sends a web request received by the web server started
in this example), Cozmo will play an animation, speak the email sender's name and
show a mailbox image on his face.
Please place Cozmo on the charger for this example. When necessary, he will be
rolled off and back on.
Follow these steps to set up and run the example:
1) Provide a a static ip, URL or similar that can be reached from the If This
Then That server. One easy way to do this is with ngrok, which sets up
a secure tunnel to localhost running on your machine.
To set up ngrok:
a) Follow instructions here to download and install:
https://ngrok.com/download
b) Run this command to create a secure public URL for port 8080:
./ngrok http 8080
c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).
You will use this address in your applet, below.
WARNING: Using ngrok exposes your local web server to the internet. See the ngrok
documentation for more information: https://ngrok.com/docs
2) Set up your applet on the "If This Then That" website.
a) Sign up and sign into https://ifttt.com
b) Create an applet: https://ifttt.com/create
c) Set up your trigger.
1. Click "this".
2. Select "Gmail" as your service. If prompted, click "Connect",
select your Gmail account, and click “Allow” to provide permissions
to IFTTT for your email account. Click "Done".
3. Under "Choose a Trigger", select “Any new email in inbox".
d) Set up your action.
1. Click “that".
2. Select “Maker" to set it as your action channel. Connect to the Maker channel if prompted.
3. Click “Make a web request" and fill out the fields as follows. Remember your publicly
accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field,
followed by "/iftttGmail" as shown below:
URL: http://55e57164.ngrok.io/iftttGmail
Method: POST
Content Type: application/json
Body: {"FromAddress":"{{FromAddress}}"}
5. Click “Create Action" then “Finish".
3) Test your applet.
a) Run this script at the command line: ./ifttt_gmail.py
b) On ifttt.com, on your applet page, click “Check now”. See that IFTTT confirms that the applet
was checked.
c) Send an email to the Gmail account in your recipe
d) On your IFTTT applet webpage, again click “Check now”. This should cause IFTTT to detect that
the email was received and send a web request to the ifttt_gmail.py script.
e) In response to the ifttt web request, Cozmo should roll off the charger, raise and lower
his lift, announce the email, and then show a mailbox image on his face.
'''
import asyncio
import re
import sys
try:
from aiohttp import web
except ImportError:
sys.exit("Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install")
import cozmo
from common import IFTTTRobot
app = web.Application()
async def serve_gmail(request):
'''Define an HTTP POST handler for receiving requests from If This Then That.
You may modify this method to change how Cozmo reacts to the email
being received.
'''
json_object = await request.json()
# Extract the name of the email sender.
from_email_address = json_object["FromAddress"]
# Use a regular expression to break apart pieces of the email address
match_object = re.search(r'([\w.]+)@([\w.]+)', from_email_address)
email_local_part = match_object.group(1)
robot = request.app['robot']
async def read_name():
try:
async with robot.perform_off_charger():
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face.'''
await robot.get_in_position()
# First, have Cozmo play animation "ID_pokedB", which tells
# Cozmo to raise and lower his lift. To change the animation,
# you may replace "ID_pokedB" with another animation. Run
# remote_control_cozmo.py to see a list of animations.
await robot.play_anim(name='ID_pokedB').wait_for_completed()
# Next, have Cozmo speak the name of the email sender.
await robot.say_text("Email from " + email_local_part).wait_for_completed()
# Last, have Cozmo display an email image on his face.
robot.display_image_file_on_face("../face_images/ifttt_gmail.png")
except cozmo.RobotBusy:
cozmo.logger.warning("Robot was busy so didn't read email address: "+ from_email_address)
# Perform Cozmo's task in the background so the HTTP server responds immediately.
asyncio.ensure_future(read_name())
return web.Response(text="OK")
# Attach the function as an HTTP handler.
app.router.add_post('/iftttGmail', serve_gmail)
if __name__ == '__main__':
cozmo.setup_basic_logging()
cozmo.robot.Robot.drive_off_charger_on_connect = False
# Use our custom robot class with extra helper methods
cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot
try:
sdk_conn = cozmo.connect_on_loop(app.loop)
# Wait for the robot to become available and add it to the app object.
app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot())
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
web.run_app(app)
| 3.15625 | 3 |
plotutils.py | parkus/mypy | 1 | 3217 | # -*- coding: utf-8 -*-
"""
Created on Fri May 30 17:15:27 2014
@author: Parke
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib as mplot
import matplotlib.pyplot as plt
import mypy.my_numpy as mnp
dpi = 100
fullwidth = 10.0
halfwidth = 5.0
# use these with line.set_dashes and iterate through more linestyles than come with matplotlib
# consider ussing a ::2 slice for fewer
dashes = [[],
[30, 10],
[20, 8],
[10, 5],
[3, 2],
[30, 5, 3, 5, 10, 5, 3, 5],
[15] + [5, 3]*3 + [5],
[15] + [5, 3]*2 + [5],
[15] + [5, 3] + [5]]
def click_coords(fig=None, timeout=600.):
if fig is None:
fig = plt.gcf()
xy = []
def onclick(event):
if not event.inaxes:
fig.canvas.stop_event_loop()
else:
xy.append([event.xdata, event.ydata])
print("Gathering coordinates of mouse clicks. Click outside of the axes " \
"when done.")
cid = fig.canvas.mpl_connect('button_press_event', onclick)
fig.canvas.start_event_loop(timeout=timeout)
fig.canvas.mpl_disconnect(cid)
return np.array(xy)
def common_axes(fig, pos=None):
if pos is None:
bigax = fig.add_subplot(111)
else:
bigax = fig.add_axes(pos)
[bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']]
bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off')
bigax.set_zorder(-10)
return bigax
def log_frac(x, frac):
l0, l1 = list(map(np.log10, x))
ld = l1 - l0
l = ld*frac + l0
return 10**l
def log2linear(x, errneg=None, errpos=None):
xl = 10**x
result = [xl]
if errneg is not None:
xn = xl - 10**(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = 10**(x + errpos) - xl
result.append(xp)
return result
def linear2log(x, errneg=None, errpos=None):
xl = np.log10(x)
result = [x]
if errneg is not None:
xn = xl - np.log10(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = np.log10(x + errpos) - xl
result.append(xp)
return result
def step(*args, **kwargs):
edges, values = args[0], args[1]
# deal with potentially gappy 2-column bin specifications
edges = np.asarray(edges)
if edges.ndim == 2:
if np.any(edges[1:,0] < edges[:-1,1]):
raise ValueError('Some bins overlap')
if np.any(edges[1:,0] < edges[:-1,0]):
raise ValueError('Bins must be in increasing order.')
gaps = edges[1:,0] > edges[:-1,1]
edges = np.unique(edges)
if np.any(gaps):
values = np.insert(values, np.nonzero(gaps), np.nan)
edges = mnp.lace(edges[:-1], edges[1:])
values = mnp.lace(values, values)
args = list(args)
args[0], args[1] = edges, values
ax = kwargs.pop('ax', plt.gca())
return ax.plot(*args, **kwargs)
def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'):
if scale == 'log':
lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale)
return 10 ** lx, 10 ** ly
if xfrac is not None:
if xfrac == 0:
return x[0], y[0]
if xfrac == 1:
return x[-1], y[-1]
else:
d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2))
d = np.insert(d, 0, 0)
f = d/d[-1]
xp, yp = [np.interp(xfrac, f, a) for a in [x,y]]
return xp, yp
if xlbl is not None:
return xlbl, np.interp(xlbl, x, y)
def textSize(ax_or_fig=None, coordinate='data'):
"""
Return x & y scale factors for converting text sizes in points to another coordinate. Useful for properly spacing
text labels and such when you need to know sizes before the text is made (otherwise you can use textBoxSize).
Coordinate can be 'data', 'axes', or 'figure'.
If data coordinates are requested and the data is plotted on a log scale, then the factor will be given in dex.
"""
if ax_or_fig is None:
fig = plt.gcf()
ax = fig.gca()
else:
if isinstance(ax_or_fig, plt.Figure):
fig = ax_or_fig
ax = fig.gca()
elif isinstance(ax_or_fig, plt.Axes):
ax = ax_or_fig
fig = ax.get_figure()
else:
raise TypeError('ax_or_fig must be a Figure or Axes instance, if given.')
w_fig_in, h_fig_in = ax.get_figure().get_size_inches()
if coordinate == 'fig':
return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72)
w_ax_norm, h_ax_norm = ax.get_position().size
w_ax_in = w_ax_norm * w_fig_in
h_ax_in = h_ax_norm * h_fig_in
w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72
if coordinate == 'axes':
return 1.0/w_ax_pts, 1.0/h_ax_pts
if coordinate == 'data':
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if ax.get_xscale() == 'log': xlim = np.log10(xlim)
if ax.get_yscale() == 'log': ylim = np.log10(ylim)
w_ax_data = xlim[1] - xlim[0]
h_ax_data = ylim[1] - ylim[0]
return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts
def tight_axis_limits(ax=None, xory='both', margin=0.05):
if ax is None: ax = plt.gca()
def newlim(oldlim):
delta = abs(oldlim[1] - oldlim[0])
pad = delta*margin
if oldlim[1] > oldlim[0]:
return (oldlim[0] - pad, oldlim[1] + pad)
else:
return (oldlim[0] + pad, oldlim[1] - pad)
def newlim_log(oldlim):
loglim = [np.log10(l) for l in oldlim]
newloglim = newlim(loglim)
return (10.0**newloglim[0], 10.0**newloglim[1])
def newlim_either(oldlim,axlim,scale):
if axlim[1] < axlim [0]: oldlim = oldlim[::-1]
if scale == 'linear':
return newlim(oldlim)
elif scale == 'log':
return newlim_log(oldlim)
elif scale == 'symlog':
raise NotImplementedError('Past Parke to future Parke, you did\'t write an implementation for symlog'
'scaled axes.')
if xory == 'x' or xory == 'both':
datalim = ax.dataLim.extents[[0,2]]
axlim = ax.get_xlim()
scale = ax.get_xscale()
ax.set_xlim(newlim_either(datalim,axlim,scale))
if xory == 'y' or xory == 'both':
datalim = ax.dataLim.extents[[1,3]]
axlim = ax.get_ylim()
scale = ax.get_yscale()
ax.set_ylim(newlim_either(datalim,axlim,scale))
#TODO: discard this function?
def standard_figure(app, slideAR=1.6, height=1.0):
"""Generate a figure of standard size for publishing.
implemented values for app (application) are:
'fullslide'
height is the fractional height of the figure relative to the "standard"
height. For slides the standard is the full height of a slide.
returns the figure object and default font size
"""
if app == 'fullslide':
fontsize = 20
figsize = [fullwidth, fullwidth/slideAR*height]
fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi)
mplot.rcParams.update({'font.size': fontsize})
return fig, fontsize
def pcolor_reg(x, y, z, **kw):
"""
Similar to `pcolor`, but assume that the grid is uniform,
and do plotting with the (much faster) `imshow` function.
"""
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should be 1-dimensional")
if z.ndim != 2 or z.shape != (y.size, x.size):
raise ValueError("z.shape should be (y.size, x.size)")
dx = np.diff(x)
dy = np.diff(y)
if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2):
raise ValueError("The grid must be uniform")
if np.issubdtype(z.dtype, np.complexfloating):
zp = np.zeros(z.shape, float)
zp[...] = z[...]
z = zp
plt.imshow(z, origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest',
aspect='auto',
**kw)
plt.axis('tight')
def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw):
if ax is None: ax = plt.gca()
p = ax.plot(x, y, **kw) if fmt is None else ax.plot(x, y, fmt, **kw)
if len(yerr.shape) == 2:
ylo = y - yerr[0,:]
yhi = y + yerr[1,:]
else:
ylo, yhi = y - yerr, y + yerr
if ecolor is None: ecolor = p[0].get_color()
# deal with matplotlib sometimes not showing polygon when it extends beyond plot range
xlim = ax.get_xlim()
inrange = mnp.inranges(x, xlim)
if not np.all(inrange):
n = np.sum(inrange)
yends = np.interp(xlim, x, y)
yloends = np.interp(xlim, x, ylo)
yhiends = np.interp(xlim, x, yhi)
x = np.insert(x[inrange], [0, n], xlim)
y = np.insert(y[inrange], [0, n], yends)
ylo = np.insert(ylo[inrange], [0, n], yloends)
yhi = np.insert(yhi[inrange], [0, n], yhiends)
f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha)
return p[0],f
def onscreen_pres(mpl, screenwidth=1200):
"""
Set matplotlibrc values so that plots are readable as they are created
and maximized for an audience far from a screen.
Parameters
----------
mpl : module
Current matplotlib module. Use 'import matplotlib as mpl'.
screewidth : int
Width of the screen in question in pixels.
Returns
-------
None
"""
mpl.rcParams['lines.linewidth'] = 2
fontsize = round(14 / (800.0 / screenwidth))
mpl.rcParams['font.size'] = fontsize
def textBoxSize(txt, transformation=None, figure=None):
"""Get the width and height of a text object's bounding box transformed to the desired coordinates. Defaults to
figure coordinates if transformation is None."""
fig= txt.get_figure() if figure is None else figure
if transformation is None:
transformation = fig.transFigure
coordConvert = transformation.inverted().transform
bboxDisp = txt.get_window_extent(fig.canvas.renderer)
bboxConv = coordConvert(bboxDisp)
w = bboxConv[1,0] - bboxConv[0,0]
h = bboxConv[1,1] - bboxConv[0,1]
return w, h
def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0):
"""
Make a 3D diagram of stars positions relative to the Sun, with
semi-accurate colors and distances as desired. Coordinates must be in
degrees. Distance is assumed to be in pc (for axes labels).
Meant to be used with only a handful of stars.
"""
from mayavi import mlab
from color.maps import true_temp
n = len(ra)
dec, ra = dec*np.pi/180.0, ra*np.pi/180.0
makearr = lambda v: np.array([v] * n) if np.isscalar(v) else v
T, r, labels = list(map(makearr, (T, r, labels)))
# add the sun
ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0)))
r, T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun')))
# get xyz coordinates
z = dist * np.sin(dec)
h = dist * np.cos(dec)
x = h * np.cos(ra)
y = h * np.sin(ra)
# make figure
fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size)
# plot lines down to the dec=0 plane for all but the sun
lines = []
for x1, y1, z1 in list(zip(x, y, z))[:-1]:
xx, yy, zz = [x1, x1], [y1, y1], [0.0, z1]
line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5,
figure=fig)
lines.append(line)
# plot spheres
r_factor = np.max(dist) / 30.0
pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere',
scale_factor=r_factor, figure=fig, resolution=100)
pts.glyph.color_mode = 'color_by_scalar'
# center the glyphs on the data point
pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]
# set a temperature colormap
cmap = true_temp(T)
pts.module_manager.scalar_lut_manager.lut.table = cmap
# set the camera view
mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig)
if view is not None:
mlab.view(*view, figure=fig)
## add labels
# unit vec to camera
view = mlab.view()
az, el = view[:2]
hc = np.sin(el * np.pi / 180.0)
xc = hc * np.cos(az * np.pi / 180.0)
yc = hc * np.sin(az * np.pi / 180.0)
zc = -np.cos(el * np.pi / 180.0)
# unit vec orthoganal to camera
if xc**2 + yc**2 == 0.0:
xoff = 1.0
yoff = 0.0
zoff = 0.0
else:
xoff = yc / np.sqrt(xc**2 + yc**2)
yoff = np.sqrt(1.0 - xoff**2)
zoff = 0.0
# xoff, yoff, zoff = xc, yc, zc
# scale orthogonal vec by sphere size
r_label = 1.0 * r_factor
xoff, yoff, zoff = [r_label * v for v in [xoff, yoff, zoff]]
# plot labels
size = r_factor * txt_scale * 0.75
for xx, yy, zz, label in zip(x, y, z, labels):
mlab.text3d(xx + xoff, yy + yoff, zz + zoff, label, figure=fig,
color=(1,1,1), scale=size)
## add translucent dec=0 surface
n = 101
t = np.linspace(0.0, 2*np.pi, n)
r = np.max(dist * np.cos(dec))
x, y = r*np.cos(t), r*np.sin(t)
z = np.zeros(n+1)
x, y = [np.insert(a, 0, 0.0) for a in [x,y]]
triangles = [(0, i, i + 1) for i in range(1, n)]
mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig)
## add ra=0 line
line = mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig)
rtxt = '{:.1f} pc'.format(r)
orientation=np.array([180.0, 180.0, 0.0])
mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation)
if view is not None:
mlab.view(*view, figure=fig)
return fig
| 2.59375 | 3 |
marvel_world/views.py | xiaoranppp/si664-final | 0 | 3218 | from django.shortcuts import render,redirect
from django.http import HttpResponse,HttpResponseRedirect
from django.views import generic
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from .models import Character,Comic,Power,CharacterPower,CharacterComic
from django_filters.views import FilterView
from .filters import Marvel_worldFilter,Marvel_comicFilter
from .forms import CharacterForm,PowerForm,ComicForm
from django.urls import reverse,reverse_lazy
def index(request):
return HttpResponse("Hello, world. You're at the marvel world super hero")
class AboutPageView(generic.TemplateView):
template_name = 'marvel_world/about.html'
class HomePageView(generic.TemplateView):
template_name = 'marvel_world/home.html'
@method_decorator(login_required, name='dispatch')
class CharacterListView(generic.ListView):
model = Character
context_object_name = 'characters'
template_name = 'marvel_world/characters.html'
paginate_by = 50
def get_queryset(self):
return Character.objects.all().select_related('alignment','eye_color','skin_color','hair_color','race','gender','publisher').order_by('character_name')
@method_decorator(login_required, name='dispatch')
class CharacterDetailView(generic.DetailView):
model = Character
context_object_name= 'character'
template_name = 'marvel_world/character_information.html'
@method_decorator(login_required, name='dispatch')
class ComicListView(generic.ListView):
model = Comic
context_object_name = 'comics'
template_name = 'marvel_world/comics.html'
paginate_by = 600
def get_queryset(self):
return Comic.objects.all().order_by('comic_name')
@method_decorator(login_required, name='dispatch')
class ComicDetailView(generic.DetailView):
model = Comic
context_object_name= 'comic'
template_name = 'marvel_world/comic_information.html'
@method_decorator(login_required, name='dispatch')
class PowerListView(generic.ListView):
model = Power
context_object_name = 'powers'
template_name = 'marvel_world/super_power.html'
paginate_by = 50
def get_queryset(self):
return Power.objects.all().order_by('power_name')
@method_decorator(login_required, name='dispatch')
class PowerDetailView(generic.DetailView):
model = Power
context_object_name= 'power'
template_name = 'marvel_world/super_power_information.html'
@method_decorator(login_required, name='dispatch')
class CharacterFilterView(FilterView):
filterset_class = Marvel_worldFilter
template_name = 'marvel_world/character_filter.html'
@method_decorator(login_required, name='dispatch')
class ComicFilterView(FilterView):
filterset_class = Marvel_comicFilter
template_name = 'marvel_world/comic_filter.html'
@method_decorator(login_required, name='dispatch')
class CharacterCreateView(generic.View):
model = Character
form_class = CharacterForm
success_message = "Character created successfully"
template_name = 'marvel_world/character_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = reverse_lazy('heritagesites/site_list')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = CharacterForm(request.POST)
if form.is_valid():
character = form.save(commit=False)
character.save()
for power in form.cleaned_data['super_power']:
CharacterPower.objects.create(character=character, power=power)
for comic in form.cleaned_data['comics']:
CharacterComic.objects.create(character=character, comic=comic)
return redirect(character) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(site.get_absolute_url())
return render(request, 'marvel_world/character_new.html', {'form': form})
def get(self, request):
form = CharacterForm()
return render(request, 'marvel_world/character_new.html', {'form': form})
@method_decorator(login_required, name='dispatch')
class PowerCreateView(generic.View):
model = Power
form_class = PowerForm
success_message = "Super power created successfully"
template_name = 'marvel_world/power_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = reverse_lazy('heritagesites/site_list')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = PowerForm(request.POST)
if form.is_valid():
power = form.save(commit=False)
power.save()
for character in form.cleaned_data['character']:
CharacterPower.objects.create(character=character, power=power)
return redirect(power) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(site.get_absolute_url())
return render(request, 'marvel_world/power_new.html', {'form': form})
def get(self, request):
form = PowerForm()
return render(request, 'marvel_world/power_new.html', {'form': form})
@method_decorator(login_required, name='dispatch')
class ComicCreateView(generic.View):
model = Comic
form_class = ComicForm
success_message = "Comic created successfully"
template_name = 'marvel_world/comic_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = reverse_lazy('heritagesites/site_list')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = ComicForm(request.POST)
if form.is_valid():
comic = form.save(commit=False)
comic.save()
for character in form.cleaned_data['character']:
CharacterComic.objects.create(character=character, comic=comic)
return redirect(comic) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(site.get_absolute_url())
return render(request, 'marvel_world/comic_new.html', {'form': form})
def get(self, request):
form = ComicForm()
return render(request, 'marvel_world/comic_new.html', {'form': form})
#class CharacterDetailView(generic.DetailView):model = Characters context_object_name= 'character'template_name='marvel_world/character_information.html'
@method_decorator(login_required, name='dispatch')
class CharacterUpdateView(generic.UpdateView):
model = Character
form_class = CharacterForm
# fields = '__all__' <-- superseded by form_class
context_object_name = 'character'
# pk_url_kwarg = 'site_pk'
success_message = "Character updated successfully"
template_name = 'marvel_world/character_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
character = form.save(commit=False)
# site.updated_by = self.request.user
# site.date_updated = timezone.now()
character.save()
# Current country_area_id values linked to site
old_ids = CharacterPower.objects\
.values_list('power_id', flat=True)\
.filter(character_id=character.character_id)
# New countries list
new_powers = form.cleaned_data['super_power']
# TODO can these loops be refactored?
# New ids
new_ids = []
# Insert new unmatched country entries
for power in new_powers:
new_id = power.power_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
CharacterPower.objects \
.create(character=character, power=power)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
CharacterPower.objects \
.filter(character_id=character.character_id, power_id=old_id) \
.delete()
old_ids1 = CharacterComic.objects\
.values_list('comic_id', flat=True)\
.filter(character_id=character.character_id)
# New countries list
new_comics = form.cleaned_data['comics']
# TODO can these loops be refactored?
# New ids
new_ids1 = []
# Insert new unmatched country entries
for comic in new_comics:
new_id1 = comic.comic_id
new_ids1.append(new_id1)
if new_id1 in old_ids1:
continue
else:
CharacterComic.objects \
.create(character=character, comic=comic)
# Delete old unmatched country entries
for old_id1 in old_ids1:
if old_id1 in new_ids1:
continue
else:
CharacterComic.objects \
.filter(character_id=character.character_id, comic_id=old_id1) \
.delete()
return HttpResponseRedirect(character.get_absolute_url())
@method_decorator(login_required, name='dispatch')
class PowerUpdateView(generic.UpdateView):
model = Power
form_class = PowerForm
# fields = '__all__' <-- superseded by form_class
context_object_name = 'power'
# pk_url_kwarg = 'site_pk'
success_message = "Super power updated successfully"
template_name = 'marvel_world/power_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
power = form.save(commit=False)
# site.updated_by = self.request.user
# site.date_updated = timezone.now()
power.save()
# Current country_area_id values linked to site
old_ids = CharacterPower.objects\
.values_list('character_id', flat=True)\
.filter(power_id=power.power_id)
# New countries list
new_chs = form.cleaned_data['character']
# TODO can these loops be refactored?
# New ids
new_ids = []
# Insert new unmatched country entries
for character in new_chs:
new_id = character.character_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
CharacterPower.objects \
.create(character=character, power=power)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
CharacterPower.objects \
.filter(character_id=old_id, power_id=power.power_id) \
.delete()
return HttpResponseRedirect(power.get_absolute_url())
# return redirect('heritagesites/site_detail', pk=site.pk)
@method_decorator(login_required, name='dispatch')
class ComicUpdateView(generic.UpdateView):
model = Comic
form_class = ComicForm
# fields = '__all__' <-- superseded by form_class
context_object_name = 'comic'
# pk_url_kwarg = 'site_pk'
success_message = "Comic updated successfully"
template_name = 'marvel_world/comic_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
comic = form.save(commit=False)
# site.updated_by = self.request.user
# site.date_updated = timezone.now()
comic.save()
# Current country_area_id values linked to site
old_ids = CharacterComic.objects\
.values_list('character_id', flat=True)\
.filter(comic_id=comic.comic_id)
# New countries list
new_chs = form.cleaned_data['character']
# TODO can these loops be refactored?
# New ids
new_ids = []
# Insert new unmatched country entries
for character in new_chs:
new_id = character.character_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
CharacterComic.objects \
.create(character=character, comic=comic)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
CharacterComic.objects \
.filter(character_id=old_id, comic_id=comic.comic_id) \
.delete()
return HttpResponseRedirect(comic.get_absolute_url())
@method_decorator(login_required, name='dispatch')
class CharacterDeleteView(generic.DeleteView):
model =Character
success_message = "Character deleted successfully"
success_url = reverse_lazy('characters')
context_object_name = 'character'
template_name = 'marvel_world/character_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete HeritageSiteJurisdiction entries
CharacterPower.objects \
.filter(character_id=self.object.character_id) \
.delete()
CharacterComic.objects \
.filter(character_id=self.object.character_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
@method_decorator(login_required, name='dispatch')
class PowerDeleteView(generic.DeleteView):
model =Power
success_message = "Super power deleted successfully"
success_url = reverse_lazy('super_power')
context_object_name = 'power'
template_name = 'marvel_world/power_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete HeritageSiteJurisdiction entries
CharacterPower.objects \
.filter(power_id=self.object.power_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
@method_decorator(login_required, name='dispatch')
class ComicDeleteView(generic.DeleteView):
model =Comic
success_message = "Comic deleted successfully"
success_url = reverse_lazy('comics')
context_object_name = 'comic'
template_name = 'marvel_world/comic_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete HeritageSiteJurisdiction entries
CharacterComic.objects \
.filter(comic_id=self.object.comic_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url()) | 2.15625 | 2 |
src/rpi/fwd.py | au-chrismor/selfdrive | 0 | 3219 | """Set-up and execute the main loop"""
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#Right motor input A
GPIO.setup(18,GPIO.OUT)
#Right motor input B
GPIO.setup(23,GPIO.OUT)
GPIO.output(18,GPIO.HIGH)
GPIO.output(23,GPIO.LOW)
| 3.078125 | 3 |
util/get_from_db.py | Abel-Huang/simple-image-classifier | 4 | 3220 | import pymysql
# 连接配置信息
config = {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '',
'db': 'classdata',
'charset': 'utf8',
'cursorclass': pymysql.cursors.DictCursor,
}
def get_summary_db(unitag):
# 创建连接
conn = pymysql.connect(**config)
cur = conn.cursor()
# 执行sql语句
try:
# 执行sql语句,进行查询
sql = 'SELECT * FROM summary where unitag= %s'
cur.execute(sql,unitag)
# 获取查询结果
result = cur.fetchall()
return result
finally:
cur.close()
conn.close()
def get_result_db(unitag):
# 创建连接
conn = pymysql.connect(**config)
cur = conn.cursor()
# 执行sql语句
try:
# 执行sql语句,进行查询
sql = 'SELECT * FROM result where unitag= %s'
cur.execute(sql,unitag)
# 获取查询结果
result = cur.fetchall()
return result
finally:
cur.close()
conn.close()
| 2.78125 | 3 |
registerapp/api.py | RajapandiR/django-register | 0 | 3221 | <gh_stars>0
from rest_framework import viewsets
from rest_framework.views import APIView
from registerapp import serializers
from registerapp import models
class RegisterViewSet(viewsets.ModelViewSet):
serializer_class = serializers.RegisterSerializer
queryset = models.RegisterPage.objects.all()
| 1.554688 | 2 |
jduck/robot.py | luutp/jduck | 0 | 3222 | <reponame>luutp/jduck
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
jduck.py
Description:
Author: luutp
Contact: <EMAIL>
Created on: 2021/02/27
"""
# Utilities
# %%
# ================================IMPORT PACKAGES====================================
# Utilities
from traitlets.config.configurable import SingletonConfigurable
# Custom Packages
from jduck.DCMotor import DCMotor
# ================================================================================
class JDuck(SingletonConfigurable):
def __init__(self, *args, **kwargs):
self.left_motor = DCMotor(32, 36, 38, alpha=1.0)
self.right_motor = DCMotor(33, 35, 37, alpha=1.0)
self.left_motor.set_speed(50)
self.right_motor.set_speed(50)
def set_speeds(self, left_speed, right_speed):
self.left_motor.set_speed(left_speed)
self.right_motor.set_speed(right_speed)
def move_forward(self):
self.left_motor.rotate_forward()
self.right_motor.rotate_forward()
def move_backward(self):
self.left_motor.rotate_backward()
self.right_motor.rotate_backward()
def turn_left(self):
self.left_motor.rotate_backward()
self.right_motor.rotate_forward()
def turn_right(self):
self.left_motor.rotate_forward()
self.right_motor.rotate_backward()
def stop(self):
self.left_motor.stop()
self.right_motor.stop()
| 2.359375 | 2 |
src/nebulo/gql/alias.py | olirice/nebulo | 76 | 3223 | # pylint: disable=missing-class-docstring,invalid-name
import typing
from graphql.language import (
InputObjectTypeDefinitionNode,
InputObjectTypeExtensionNode,
ObjectTypeDefinitionNode,
ObjectTypeExtensionNode,
)
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLFieldMap,
GraphQLFloat,
GraphQLID,
GraphQLInputFieldMap,
GraphQLInputObjectType,
GraphQLInt,
GraphQLInterfaceType,
GraphQLIsTypeOfFn,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLResolveInfo,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
GraphQLType,
Thunk,
)
from graphql.type.definition import GraphQLInputFieldOutType
from nebulo.sql.composite import CompositeType as SQLACompositeType
# Handle name changes from graphql-core and graphql-core-next
try:
from graphql.type import GraphQLInputObjectField as GraphQLInputField
except ImportError:
from graphql.type import GraphQLInputField
Type = GraphQLType
List = GraphQLList
NonNull = GraphQLNonNull
Argument = GraphQLArgument
Boolean = GraphQLBoolean
String = GraphQLString
ScalarType = GraphQLScalarType
ID = GraphQLID
InterfaceType = GraphQLInterfaceType
Int = GraphQLInt
InputField = GraphQLInputField
ResolveInfo = GraphQLResolveInfo
EnumType = GraphQLEnumType
EnumValue = GraphQLEnumValue
Schema = GraphQLSchema
Field = GraphQLField
Float = GraphQLFloat
EnumType = GraphQLEnumType
class HasSQLAModel: # pylint: disable= too-few-public-methods
sqla_table = None
class HasSQLFunction: # pylint: disable= too-few-public-methods
sql_function = None
class HasSQLAComposite: # pylint: disable= too-few-public-methods
sqla_composite: SQLACompositeType
class ObjectType(GraphQLObjectType, HasSQLAModel):
def __init__(
self,
name: str,
fields: Thunk[GraphQLFieldMap],
interfaces: typing.Optional[Thunk[typing.Collection["GraphQLInterfaceType"]]] = None,
is_type_of: typing.Optional[GraphQLIsTypeOfFn] = None,
extensions: typing.Optional[typing.Dict[str, typing.Any]] = None,
description: typing.Optional[str] = None,
ast_node: typing.Optional[ObjectTypeDefinitionNode] = None,
extension_ast_nodes: typing.Optional[typing.Collection[ObjectTypeExtensionNode]] = None,
sqla_model=None,
) -> None:
super().__init__(
name=name,
fields=fields,
interfaces=interfaces,
is_type_of=is_type_of,
extensions=extensions,
description=description,
ast_node=ast_node,
extension_ast_nodes=extension_ast_nodes,
)
self.sqla_model = sqla_model
class ConnectionType(ObjectType):
pass
class EdgeType(ObjectType):
pass
class TableType(ObjectType):
pass
class CompositeType(ObjectType, HasSQLAComposite):
pass
class MutationPayloadType(ObjectType):
pass
class CreatePayloadType(MutationPayloadType):
pass
class UpdatePayloadType(MutationPayloadType):
pass
class DeletePayloadType(MutationPayloadType):
pass
class FunctionPayloadType(MutationPayloadType, HasSQLFunction):
pass
class InputObjectType(GraphQLInputObjectType, HasSQLAModel):
def __init__(
self,
name: str,
fields: Thunk[GraphQLInputFieldMap],
description: typing.Optional[str] = None,
out_type: typing.Optional[GraphQLInputFieldOutType] = None,
extensions: typing.Optional[typing.Dict[str, typing.Any]] = None,
ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None,
extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None,
sqla_model=None,
) -> None:
super().__init__(
name=name,
fields=fields,
description=description,
out_type=out_type,
extensions=extensions,
ast_node=ast_node,
extension_ast_nodes=extension_ast_nodes,
)
self.sqla_model = sqla_model
class CreateInputType(InputObjectType):
pass
class TableInputType(InputObjectType):
pass
class UpdateInputType(InputObjectType):
pass
class DeleteInputType(InputObjectType):
pass
class FunctionInputType(GraphQLInputObjectType):
def __init__(
self,
name: str,
fields: Thunk[GraphQLInputFieldMap],
description: typing.Optional[str] = None,
out_type: typing.Optional[GraphQLInputFieldOutType] = None,
extensions: typing.Optional[typing.Dict[str, typing.Any]] = None,
ast_node: typing.Optional[InputObjectTypeDefinitionNode] = None,
extension_ast_nodes: typing.Optional[typing.Collection[InputObjectTypeExtensionNode]] = None,
sql_function=None,
) -> None:
super().__init__(
name=name,
fields=fields,
description=description,
out_type=out_type,
extensions=extensions,
ast_node=ast_node,
extension_ast_nodes=extension_ast_nodes,
)
self.sql_function = sql_function
| 1.617188 | 2 |
integrations/tensorflow/bindings/python/pyiree/tf/compiler/saved_model_test.py | rise-lang/iree | 1 | 3224 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
import sys
import tempfile
from pyiree.tf import compiler
# Dynamically import tensorflow.
try:
# Use a dynamic import so as to avoid hermetic dependency analysis
# (i.e. we only want the tensorflow from the environment).
tf = importlib.import_module("tensorflow")
# Just in case if linked against a pre-V2 defaulted version.
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf = tf.compat.v2
except ImportError:
print("Not running tests because tensorflow is not available")
sys.exit(0)
class StatelessModule(tf.Module):
def __init__(self):
pass
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def add(self, a, b):
return tf.tanh(a + b)
class RuntimeTest(tf.test.TestCase):
def testLoadSavedModelToXlaPipeline(self):
"""Tests that a basic saved model to XLA workflow grossly functions.
This is largely here to verify that everything is linked in that needs to be
and that there are not no-ops, etc.
"""
with tempfile.TemporaryDirectory() as temp_dir:
sm_dir = os.path.join(temp_dir, "simple.sm")
print("Saving to:", sm_dir)
my_module = StatelessModule()
options = tf.saved_model.SaveOptions(save_debug_info=True)
tf.saved_model.save(my_module, sm_dir, options=options)
# Load it up.
input_module = compiler.tf_load_saved_model(sm_dir)
xla_asm = input_module.to_asm()
print("XLA ASM:", xla_asm)
self.assertRegex(xla_asm, "mhlo.tanh")
if __name__ == "__main__":
tf.test.main()
| 1.914063 | 2 |
api/models/indicator/child_objects/properties.py | taco-chainalysis/pypulsedive | 0 | 3225 | <reponame>taco-chainalysis/pypulsedive
from .grandchild_objects import Cookies
from .grandchild_objects import Dns
from .grandchild_objects import Dom
from .grandchild_objects import Geo
#from .grandchild_objects import Http
#from .grandchild_objects import Meta
from .grandchild_objects import Ssl
#from .grandchild_objects import WhoIs
class Properties(object):
FIELD_MAP = {
"cookies": "cookies",
"dns": "dns",
"dom": "dom",
"geo": "geo",
"http": "http",
"meta": "meta",
"ssl": "ssl",
"whois": "whois"
}
def __init__(self):
self.cookies = ""
self.dns = ""
self.dom = ""
self.geo = ""
self.http = ""
self.meta = ""
self.ssl = ""
self.whois = ""
@staticmethod
def from_dictionary(properties_dict: dict):
properties = Properties()
field_map = getattr(properties.__class__, "FIELD_MAP")
for key_name in field_map:
if key_name in properties_dict:
setattr(properties, field_map[key_name], properties_dict[key_name])
properties.cookies = Cookies.from_dictionary(properties.cookies)
properties.dns = Dns.from_dictionary(properties.dns)
properties.dom = Dom.from_dictionary(properties.dom)
properties.geo = Geo.from_dictionary(properties.geo)
#properties.http = Http.from_dictionary(properties.http)
#properties.meta = Meta.from_dictionary(properties.meta)
properties.ssl = Ssl.from_dictionary(properties.ssl)
#properties.whois = WhoIs.from_dictionary(properties.whois)
return properties | 2.15625 | 2 |
iRep/gc_skew.py | scottdaniel/iRep | 55 | 3226 | #!/usr/bin/env python3
"""
script for calculating gc skew
<NAME>
<EMAIL>
"""
# python modules
import os
import sys
import argparse
import numpy as np
from scipy import signal
from itertools import cycle, product
# plotting modules
from matplotlib import use as mplUse
mplUse('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
plt.rcParams['pdf.fonttype'] = 42
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# ctb
from ctbBio.fasta import iterate_fasta as parse_fasta
def plot_two(title, subtitle, A, B, labels, legend, vert = False):
"""
plot with differnt y axes
title = title for chart
A = data for left axis [[x], [y]]
B = data for right axis
lables = [left label, right label, x label]
legend = [[left legend], [right legend]]
"""
fig, ax1 = plt.subplots()
colors = ['0.75', 'b', 'r', 'c', 'y', 'm', 'k', 'g']
a_colors = cycle(colors)
b_colors = cycle(colors[::-1])
a_label = cycle(legend[0])
b_label = cycle(legend[1])
# plot left axis and x - axis
for a in A:
x, y = a
ax1.set_ylabel(labels[0], labelpad = 3)
ax1.set_xlabel(labels[-1])
ax1.plot(x, y, c = next(a_colors), marker = 'o', ms = 4, label = next(a_label))
# add vertical lines
if vert is not False:
for i in vert:
x, c = i
ax1.axvline(x = x, c = c, label = next(a_label), linewidth = 2)
# plot right axis
ax2 = ax1.twinx()
for b in B:
x, y = b
ax2.set_ylabel(labels[1], labelpad = 8)
ax2.plot(x, y, c = next(b_colors), linewidth = 2, label = next(b_label))
xmin = min([min(i[1]) for i in A] + [min(i[0]) for i in B])
xmax = max([max(i[0]) for i in A] + [max(i[0]) for i in B])
ax2.set_xlim(xmin, xmax)
# title
plt.suptitle(title, fontsize = 16)
plt.title(subtitle, fontsize = 10)
# legend
ax1.legend(loc = 'upper left', \
bbox_to_anchor=(0.55, -0.125), \
prop = {'size':8}, \
framealpha = 0.0
)
plt.legend(loc = 'upper right', \
bbox_to_anchor=(0.45, -0.125), \
prop = {'size':8}, \
framealpha = 0.0\
)
# save
pdf = PdfPages('%s.pdf' % title.replace(' ', '_'))
pdf.savefig(bbox_inches = 'tight')
plt.close()
pdf.close()
def check_peaks(peaks, length):
"""
select pair of min and max that are not too close or
too far apart and have greatest y distance between one another
"""
# if ori/ter peaks are too close or too far apart, they are probably wrong
closest, farthest = int(length * float(0.45)), int(length * float(0.55))
pairs = []
for pair in list(product(*peaks)):
### added this to make sure gets origin and ter right
tr, pk = sorted(list(pair), key = lambda x: x[1], reverse = False) # trough and peak
a = (tr[0] - pk[0]) % length
b = (pk[0] - tr[0]) % length
pt = abs(tr[1] - pk[1]) # distance between values
if (a <= farthest and a >= closest) or (b <=farthest and b >= closest):
pairs.append([pt, tr, pk])
if len(pairs) == 0:
return [False, False]
pt, tr, pk = sorted(pairs, reverse = True)[0]
return [tr[0], pk[0]]
def find_ori_ter(c_skew, length):
"""
find origin and terminus of replication based on
cumulative GC Skew
"""
# find origin and terminus of replication based on
# cumulative gc skew min and max peaks
c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist()
c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist()
# return False if no peaks were detected
if len(c_skew_min) == 0 or len(c_skew_min) == 0:
return [False, False]
else:
c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min]
c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max]
ori, ter = check_peaks([c_skew_min, c_skew_max], length)
return ori, ter
def gc_skew(name, length, seq, window, slide, plot_skew):
"""
calculate gc skew and cumulative sum of gc skew over sequence windows
gc skew = ((G - C) / (G + C)) * window size * genome length
"""
# convert to G - C
replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0}
gmc = [] # G - C
for base in seq:
try:
gmc.append(replacements[base])
except:
gmc.append(0)
# convert to G + C
gpc = [abs(i) for i in gmc] # G + C
# calculate sliding windows for (G - C) and (G + C)
weights = np.ones(window)/window
gmc = [[i, c] for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())]
gpc = [[i, c] for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())]
# calculate gc skew and cummulative gc skew sum
skew = [[], []] # x and y for gc skew
c_skew = [[], []] # x and y for gc skew cummulative sums
cs = 0 # cummulative sum
# select windows to use based on slide
for i, m in gmc[0::slide]:
p = gpc[i][1]
if p == 0:
gcs = 0
else:
gcs = m/p
cs += gcs
skew[0].append(i)
c_skew[0].append(i)
skew[1].append(gcs)
c_skew[1].append(cs)
ori, ter = find_ori_ter(c_skew, length)
# plot data
if plot_skew is True:
title = '%s GC Skew' % (name)
subtitle = '(window = %s, slide = %s)' % (window, slide)
labels = ['GC Skew', 'Cumulative GC Skew', 'Position on Genome (bp)']
# remove some points for plotting (approx. 1,000 datapoints)
N = int(len(skew[0])/1000)
if N != 0:
skew = [skew[0][0::N], skew[1][0::N]]
if ori is False:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0]], [labels[1]]])
else:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \
'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \
vert = [(ori, 'r'), (ter, 'b')])
return ori, ter, skew, c_skew
def parse_genomes(fastas, single):
"""
generator for parsing fastas
if single is True, combine sequences in multifasta file
"""
if single is True:
for genome in fastas:
sequence = []
for seq in parse_fasta(genome):
sequence.extend(list(seq[1].upper()))
yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence)
else:
for genome in fastas:
for seq in parse_fasta(genome):
ID = seq[0].split('>', 1)[1].split()[0]
yield (ID, len(seq[1]), list(seq[1].upper()))
def open_files(files):
"""
open files in list, use stdin if first
item in list is '-'
"""
if files is None:
return files
if files[0] == '-':
return (sys.stdin)
return (open(i) for i in files)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = \
'# calculate gc skew and find Ori and Ter of replication')
parser.add_argument(\
'-f', nargs = '*', action = 'store', required = True, \
help = 'fasta(s)')
parser.add_argument(\
'-l', default = False, type = int, \
help = 'minimum contig length (default = 10 x window)')
parser.add_argument(\
'-w', default = 1000, type = int, \
help = 'window length (default = 1000)')
parser.add_argument(\
'-s', default = 10, type = int, \
help = 'slide length (default = 10)')
parser.add_argument(\
'--single', action = 'store_true', \
help = 'combine multi-fasta sequences into single genome')
parser.add_argument(\
'--no-plot', action = 'store_false', \
help = 'do not generate plots, print GC Skew to stdout')
args = vars(parser.parse_args())
fastas = open_files(args['f'])
single, plot_skew = args['single'], args['no_plot']
window, slide = args['w'], args['s']
min_len = args['l']
if min_len is False:
min_len = 10 * window
for name, length, seq in parse_genomes(fastas, single):
if length < min_len:
print('%s: Too Short' % (name), file=sys.stderr)
continue
ori, ter, skew, c_skew = gc_skew(name, length, seq, window, slide, plot_skew)
if ori == False:
ori, ter = 'n/a', 'n/a'
else:
ori, ter = '{:,}'.format(ori), '{:,}'.format(ter)
print('%s -> Origin: %s Terminus: %s' \
% (name, ori, ter), file=sys.stderr)
if plot_skew is False:
print('\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC Skew']))
for i, pos in enumerate(skew[0]):
out = [name, pos, skew[1][i], c_skew[1][i]]
print('\t'.join([str(i) for i in out]))
| 2.484375 | 2 |
examples/send_governance_vote_transaction.py | Algofiorg/algofi-py-sdk | 38 | 3227 | <reponame>Algofiorg/algofi-py-sdk
# This sample is provided for demonstration purposes only.
# It is not intended for production use.
# This example does not constitute trading advice.
import os
from dotenv import dotenv_values
from algosdk import mnemonic, account
from algofi.v1.asset import Asset
from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient
from algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account
from example_utils import print_market_state, print_user_state
### run setup.py before proceeding. make sure the .env file is set with mnemonic + storage_mnemonic.
# Hardcoding account keys is not a great practice. This is for demonstration purposes only.
# See the README & Docs for alternative signing methods.
my_path = os.path.abspath(os.path.dirname(__file__))
ENV_PATH = os.path.join(my_path, ".env")
# load user passphrase
user = dotenv_values(ENV_PATH)
sender = mnemonic.to_public_key(user['mnemonic'])
key = mnemonic.to_private_key(user['mnemonic'])
# IS_MAINNET
IS_MAINNET = False
client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender)
# NOTE: Get the live governance address at https://governance.algorand.foundation/api/periods/
# under "sign_up_address" for the relevant governance period
# Specify your vote according to the formats that are permissible in the Algorand Foundation Spec
# https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md
# Get the idx, vote choices based on the relevant voting session from https://governance.algorand.foundation/api/periods/
address = sender
governance_address = ""
vote_note = b'af/gov1:j[6,"a","c"]' # NOTE: an example, not to be used in live voting necessarily
vault_address = client.manager.get_storage_address(address)
print("~"*100)
print("Processing send_governance_vote_transaction transaction for vault address " + vault_address)
print("~"*100)
txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address)
txn.sign_with_private_key(sender, key)
txn.submit(client.algod, wait=True)
# After sending, check your vote at
# https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address>
# to confirm successful vote in voting session
# print final state
print("~"*100)
print("Final State")
print("Sent governance transaction with note: " + str(vote_note))
print("~"*100) | 1.898438 | 2 |
bid/inventoryClient.py | franklx/SOAPpy-py3 | 7 | 3228 | <gh_stars>1-10
#!/usr/bin/env python
import getopt
import sys
import string
import re
import time
sys.path.insert(1,"..")
from SOAPpy import SOAP
import traceback
DEFAULT_SERVERS_FILE = './inventory.servers'
DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping')
def usage (error = None):
sys.stdout = sys.stderr
if error != None:
print(error)
print("""usage: %s [options] [server ...]
If a long option shows an argument is mandatory, it's mandatory for the
equivalent short option also.
-?, --help display this usage
-d, --debug turn on debugging in the SOAP library
-i, --invert test servers *not* in the list of servers given
-m, --method=METHOD#[,METHOD#...]
call only the given methods, specify a METHOD# of ?
for the list of method numbers
-o, --output=TYPE turn on output, TYPE is one or more of s(uccess),
f(ailure), n(ot implemented), F(ailed (as expected)),
a(ll)
[f]
-s, --servers=FILE use FILE as list of servers to test [%s]
-t, --stacktrace print a stack trace on each unexpected failure
-T, --always-stacktrace
print a stack trace on any failure
""" % (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ')
sys.exit (0)
def methodUsage ():
sys.stdout = sys.stderr
print("Methods are specified by number. Multiple methods can be " \
"specified using a\ncomma-separated list of numbers or ranges. " \
"For example 1,4-6,8 specifies\nmethods 1, 4, 5, 6, and 8.\n")
print("The available methods are:\n")
half = (len (DEFAULT_METHODS) + 1) / 2
for i in range (half):
print("%4d. %-25s" % (i + 1, DEFAULT_METHODS[i]), end=' ')
if i + half < len (DEFAULT_METHODS):
print("%4d. %-25s" % (i + 1 + half, DEFAULT_METHODS[i + half]), end=' ')
print()
sys.exit (0)
def readServers (file):
servers = []
f = open (file, 'r')
while 1:
line = f.readline ()
if line == '':
break
if line[0] in ('#', '\n') or line[0] in string.whitespace:
continue
cur = {'nonfunctional': {}}
tag = None
servers.append (cur)
while 1:
if line[0] in string.whitespace:
if tag == 'nonfunctional':
value = method + ' ' + cur[tag][method]
else:
value = cur[tag]
value += ' ' + line.strip ()
else:
tag, value = line.split (':', 1)
tag = tag.strip ().lower ()
value = value.strip ()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if tag == 'nonfunctional':
value = value.split (' ', 1) + ['']
method = value[0]
cur[tag][method] = value[1]
else:
cur[tag] = value
line = f.readline ()
if line == '' or line[0] == '\n':
break
return servers
def str2list (s):
l = {}
for i in s.split (','):
if i.find ('-') != -1:
i = i.split ('-')
for i in range (int (i[0]),int (i[1]) + 1):
l[i] = 1
else:
l[int (i)] = 1
l = list(l.keys ())
l.sort ()
return l
def SimpleBuy(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'SimpleBuy'})
return serv.SimpleBuy(ProductName="widget", Quantity = 50, Address = "this is my address") #JHawk, Phalanx require this order of params
def RequestForQuote(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'RequestForQuote'})
return serv.RequestForQuote(Quantity=3, ProductName = "thing") # for Phalanx, JHawk
def Buy(serv, sa, epname):
import copy
serv = serv._sa (sa % {'methodname':'Buy'})
billTo_d = {"name":"<NAME>", "address":"1 1st Street",
"city":"New York", "state":"NY", "zipCode":"10000"}
shipTo_d = {"name":"<NAME> ", "address":"1 1st Street ",
"city":"New York ", "state":"NY ", "zipCode":"10000 "}
for k,v in list(shipTo_d.items()):
shipTo_d[k] = v[:-1]
itemd1 = SOAP.structType( {"name":"widg1","quantity":200,"price":SOAP.decimalType(45.99), "_typename":"LineItem"})
itemd2 = SOAP.structType( {"name":"widg2","quantity":400,"price":SOAP.decimalType(33.45), "_typename":"LineItem"})
items_d = SOAP.arrayType( [itemd1, itemd2] )
items_d._ns = "http://www.soapinterop.org/Bid"
po_d = SOAP.structType( data = {"poID":"myord","createDate":SOAP.dateTimeType(),"shipTo":shipTo_d, "billTo":billTo_d, "items":items_d})
try:
# it's called PO by MST (MS SOAP Toolkit), JHawk (.NET Remoting),
# Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft,
# HP, EasySoap, and Jake (Frontier). [Actzero accepts either]
return serv.Buy(PO=po_d)
except:
# called PurchaseOrder by KeithBa
return serv.Buy(PurchaseOrder=po_d)
def Ping(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'Ping'})
return serv.Ping()
def main():
servers = DEFAULT_SERVERS_FILE
methodnums = None
output = 'f'
invert = 0
succeed = 0
printtrace = 0
stats = 1
total = 0
fail = 0
failok = 0
notimp = 0
try:
opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t',
['help', 'method', 'debug', 'invert',
'output', 'servers='])
for opt, arg in opts:
if opt in ('-?', '--help'):
usage ()
elif opt in ('-d', '--debug'):
SOAP.Config.debug = 1
elif opt in ('-i', '--invert'):
invert = 1
elif opt in ('-m', '--method'):
if arg == '?':
methodUsage ()
methodnums = str2list (arg)
elif opt in ('-o', '--output'):
output = arg
elif opt in ('-s', '--servers'):
servers = arg
else:
raise AttributeError("Recognized but unimplemented option `%s'" % opt)
except SystemExit:
raise
except:
usage (sys.exc_info ()[1])
if 'a' in output:
output = 'fFns'
servers = readServers(servers)
if methodnums == None:
methodnums = list(range(1, len (DEFAULT_METHODS) + 1))
limitre = re.compile ('|'.join (args), re.IGNORECASE)
for s in servers:
if (not not limitre.match (s['name'])) == invert:
continue
serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace'])
for num in (methodnums):
if num > len(DEFAULT_METHODS):
break
total += 1
name = DEFAULT_METHODS[num - 1]
title = '%s: %s (#%d)' % (s['name'], name, num)
try:
fn = globals ()[name]
except KeyboardInterrupt:
raise
except:
if 'n' in output:
print(title, "test not yet implemented")
notimp += 1
continue
try:
res = fn (serv, s['soapaction'], s['name'])
if name in s['nonfunctional']:
print(title, "succeeded despite marked nonfunctional")
elif 's' in output:
print(title, "succeeded ")
succeed += 1
except KeyboardInterrupt:
print("fail")
raise
except:
if name in s['nonfunctional']:
if 'F' in output:
t = 'as expected'
if s['nonfunctional'][name] != '':
t += ', ' + s['nonfunctional'][name]
print(title, "failed (%s) -" %t, sys.exc_info()[1])
failok += 1
else:
if 'f' in output:
print(title, "failed -", str (sys.exc_info()[1]))
fail += 1
if stats:
print(" Tests ended at:", time.ctime (time.time()))
if stats > 0:
print(" Total tests: %d" % total)
print(" Successes: %d (%3.2f%%)" % \
(succeed, 100.0 * succeed / total))
if stats > 0 or fail > 0:
print("Failed unexpectedly: %d (%3.2f%%)" % \
(fail, 100.0 * fail / total))
if stats > 0:
print(" Failed as expected: %d (%3.2f%%)" % \
(failok, 100.0 * failok / total))
if stats > 0 or notimp > 0:
print(" Not implemented: %d (%3.2f%%)" % \
(notimp, 100.0 * notimp / total))
return fail + notimp
if __name__ == "__main__":
main()
| 2.703125 | 3 |
src/compile.py | Pixxeasy/WinTools | 0 | 3229 | import os
import json
import shutil
with open("entry.tp") as entry:
entry = json.loads(entry.read())
startcmd = entry['plugin_start_cmd'].split("%TP_PLUGIN_FOLDER%")[1].split("\\")
filedirectory = startcmd[0]
fileName = startcmd[1]
if os.path.exists(filedirectory):
os.remove(os.path.join(os.getcwd(), "WinTools"))
else:
os.makedirs("temp/"+filedirectory)
for file in os.listdir("."):
if file not in ["compile.py", "utils", "requirements.txt", "build", "dist", "main.py", "main.spec", "__pycache__", "temp"]:
print("copying", file)
shutil.copy(os.path.join(os.getcwd(), file), os.path.join("temp", filedirectory))
os.rename("dist\Main.exe", "dist\WinTools.exe")
shutil.copy(os.path.join(os.getcwd(), r"dist\WinTools.exe"), "temp/"+filedirectory)
shutil.make_archive(base_name="WinTools", format='zip', root_dir="temp", base_dir="WinTools")
os.rename("WinTools.zip", "WinTools.tpp")
| 2.375 | 2 |
suda/1121/12.py | tusikalanse/acm-icpc | 2 | 3230 | <reponame>tusikalanse/acm-icpc
for _ in range(int(input())):
x, y = list(map(int, input().split()))
flag = 1
for i in range(x, y + 1):
n = i * i + i + 41
for j in range(2, n):
if j * j > n:
break
if n % j == 0:
flag = 0
break
if flag == 0:
break
if flag:
print("OK")
else:
print("Sorry") | 2.9375 | 3 |
notification/app/node_modules/hiredis/binding.gyp | c2gconsulting/bulkpay | 208 | 3231 | {
'targets': [
{
'target_name': 'hiredis',
'sources': [
'src/hiredis.cc'
, 'src/reader.cc'
],
'include_dirs': ["<!(node -e \"require('nan')\")"],
'dependencies': [
'deps/hiredis.gyp:hiredis-c'
],
'defines': [
'_GNU_SOURCE'
],
'cflags': [
'-Wall',
'-O3'
]
}
]
}
| 1.007813 | 1 |
basic_and.py | Verkhovskaya/PyDL | 5 | 3232 | <reponame>Verkhovskaya/PyDL
from pywire import *
def invert(signal):
if signal:
return False
else:
return True
class Inverter:
def __init__(self, a, b):
b.drive(invert, a)
width = 4
a = Signal(width, io="in")
b = Signal(width, io="out")
Inverter(a, b)
build() | 2.6875 | 3 |
network/evaluate_keypoints.py | mhsung/deep-functional-dictionaries | 41 | 3233 | # <NAME> (<EMAIL>)
# April 2018
import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(BASE_DIR, '..'))
from datasets import *
from generate_outputs import *
from scipy.optimize import linear_sum_assignment
#import matplotlib.pyplot as plt
import numpy as np
def compute_all_keypoints(sess, net, data):
P = data.point_clouds
assert(P.shape[0] == data.n_data)
assert(P.shape[1] == data.n_points)
KP = data.keypoints
assert(KP.shape[0] == data.n_data)
assert(KP.shape[1] == data.n_labels)
A = predict_A(P, sess, net)
assert(A.shape[0] == data.n_data)
assert(A.shape[1] == data.n_points)
assert(A.shape[2] == net.K)
pred_KP = np.argmax(A, axis=1)
return P, KP, pred_KP
def evaluate_PCK(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
# NOTE:
# Skip if the keypoint does not exist.
labels = [i for i in range(n_labels) if KP[k,i] >= 0]
# Find the closest prediction (w/o matching).
for i, label in enumerate(labels):
all_dists = np.zeros(K)
idx_i = KP[k,label]
assert(idx_i < n_points)
p_i = P[k,idx_i]
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[j] = np.linalg.norm(p_i - p_j)
j = np.argmin(all_dists)
dists_info.append((k, i, j, all_dists[j]))
dists_info = np.array(dists_info)
return dists_info
def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# Find the best mapping from labels to bases.
all_dists = np.zeros((n_data, n_labels, K))
label_counts = np.zeros(n_labels)
for k in range(n_data):
for i in range(n_labels):
# NOTE:
# Skip if the keypoint does not exist.
if KP[k,i] < 0: continue
idx_i = KP[k,i]
assert(idx_i < n_points)
p_i = P[k,idx_i]
label_counts[i] += 1.
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[k,i,j] += np.linalg.norm(p_i - p_j)
mean_dists = np.sum(all_dists, axis=0) / \
np.expand_dims(label_counts, axis=-1)
row_ind, col_ind = linear_sum_assignment(mean_dists)
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
for (i, j) in zip(row_ind, col_ind):
if KP[k,i] < 0: continue
dists_info.append((k, i, j, all_dists[k,i,j]))
dists_info = np.array(dists_info)
return dists_info
def save_results(dists_info, out_dir, postfix=None):
# dists_info: (point_cloud_index, label, basis_index, distance)
dists = dists_info[:,3]
if postfix is not None:
out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix))
else:
out_file = os.path.join(out_dir, 'distances.npy')
np.save(out_file, dists)
print("Saved '{}'.".format(out_file))
'''
# Draw plot.
n_matches = dists.size
x_list = np.linspace(0.0, 0.1, 20 + 1)
counts = np.zeros(x_list.size, dtype=int)
for i in range(x_list.size):
counts[i] = np.sum(dists <= x_list[i])
y_list = counts.astype(x_list.dtype) / float(n_matches)
plt.clf()
plt.plot(x_list, y_list)
plt.ylim(0., 1.)
plt.yticks(np.linspace(0., 1., 10 + 1))
if postfix is not None:
out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix))
else:
out_file = os.path.join(out_dir, 'pck.png')
plt.savefig(out_file)
print("Saved '{}'.".format(out_file))
'''
def evaluate(sess, net, data, out_dir):
if not os.path.exists(out_dir): os.makedirs(out_dir)
P, KP, pred_KP = compute_all_keypoints(sess, net, data)
dists = evaluate_PCK(P, KP, pred_KP)
save_results(dists, out_dir)
dists_after_matching = evaluate_PCK_after_label_basis_matching(
P, KP, pred_KP)
save_results(dists_after_matching, out_dir, postfix='after_matching')
| 2.25 | 2 |
recipes/cxxopts/all/conanfile.py | dvirtz/conan-center-index | 562 | 3234 | import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class CxxOptsConan(ConanFile):
name = "cxxopts"
homepage = "https://github.com/jarro2783/cxxopts"
url = "https://github.com/conan-io/conan-center-index"
description = "Lightweight C++ option parser library, supporting the standard GNU style syntax for options."
license = "MIT"
topics = ("conan", "option-parser", "positional-arguments ", "header-only")
settings = "compiler"
options = { "unicode": [True, False] }
default_options = { "unicode": False }
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _minimum_cpp_standard(self):
return 11
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"gcc": "5",
"clang": "3.9",
"apple-clang": "8",
}
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
def requirements(self):
if self.options.unicode:
self.requires("icu/64.2")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("{}.hpp".format(self.name), dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_id(self):
self.info.header_only()
def package_info(self):
if self.options.unicode:
self.cpp_info.defines = ["CXXOPTS_USE_UNICODE"]
| 2.15625 | 2 |
p_030_039/problem31.py | ericgreveson/projecteuler | 0 | 3235 | <gh_stars>0
class CoinArray(list):
"""
Coin list that is hashable for storage in sets
The 8 entries are [1p count, 2p count, 5p count, ... , 200p count]
"""
def __hash__(self):
"""
Hash this as a string
"""
return hash(" ".join([str(i) for i in self]))
def main():
"""
Entry point
"""
# Important: sorted smallest to largest
coins = [1, 2, 5, 10, 20, 50, 100, 200]
coin_index = {coin: index for index, coin in enumerate(coins)}
# How many ways are there of making each number from 1 to 200 from these values?
# Building up from 1 means we can re-use earlier results
# e.g.:
# 1p: [{1}]
# 2p: [{1,1}, {2}]
# 3p: [{1,1,1}, {2,1}]
# 4p: [{1,1,1,1}, {2,1,1}, {2,2}]
# etc
way_sets = [None]
for i in range(1, 201):
way_set_i = set()
# Try using 1 of each coin and then all the ways of the remainder, if > 0
for coin in coins:
remainder = i - coin
if remainder == 0:
# We can make this with exactly this coin alone - but no larger coins
coin_count = [0 for i in coins]
coin_count[coin_index[coin]] = 1
way_set_i.add(CoinArray(coin_count))
break
elif remainder > 0:
# We can use this coin and whatever the options for the smaller value are
for rem_list in way_sets[remainder]:
new_coin_count = [c for c in rem_list]
new_coin_count[coin_index[coin]] += 1
way_set_i.add(CoinArray(new_coin_count))
else:
# Can't use any bigger coins
break
way_sets.append(way_set_i)
print(f"Number of ways of making £2: {len(way_sets[200])}")
return
if __name__ == "__main__":
main()
| 3.40625 | 3 |
video/cloud-client/quickstart/quickstart.py | nasirdec/GCP-AppEngine-Example | 1 | 3236 | <reponame>nasirdec/GCP-AppEngine-Example
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates label detection on a demo video using
the Google Cloud API.
Usage:
python quickstart.py
"""
def run_quickstart():
# [START video_quickstart]
from google.cloud import videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(
'gs://demomaker/cat.mp4', features=features)
print('\nProcessing video for label annotations:')
result = operation.result(timeout=120)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
for i, segment in enumerate(segment_label.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = segment.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# [END video_quickstart]
if __name__ == '__main__':
run_quickstart()
| 2.453125 | 2 |
ally/instrument.py | platformmaster9/PyAlly | 0 | 3237 | from . import utils
#################################################
""" INSTRUMENT """
#################################################
def Instrument(symbol):
symbol = str(symbol).upper()
return {
'__symbol' : symbol,
'Sym' : symbol,
'SecTyp' : 'CS',
'__type' : 'equity'
}
#################################################
def Equity(symbol):
return Instrument(symbol)
#################################################
def Option (instrument, maturity_date, strike):
return {
**{
'MatDt' : str(maturity_date) + 'T00:00:00.000-05:00',
'StrkPx' : str(int(strike)),
'SecTyp' : 'OPT',
'__maturity' : str(maturity_date),
'__strike' : str(int(strike))
},
**instrument
}
#################################################
def Call (instrument, maturity_date, strike):
# Let Option do some lifting
x = {
**{ 'CFI':'OC' },
**Option(instrument, maturity_date, strike)
}
x['__underlying'] = x['Sym']
x['__type'] = 'call'
x['__symbol'] = utils.option_format(
symbol = x['Sym'],
exp_date = x['__maturity'],
strike = x['__strike'],
direction = 'C'
)
return x
#################################################
def Put (instrument, maturity_date, strike):
# Let Option do some lifting
x = {
**{ 'CFI':'OP' },
**Option(instrument, maturity_date, strike)
}
x['__underlying'] = x['Sym']
x['__type'] = 'put'
x['__symbol'] = utils.option_format(
symbol = x['Sym'],
exp_date = x['__maturity'],
strike = x['__strike'],
direction = 'P'
)
return x | 2.5 | 2 |
airbyte-integrations/connectors/source-yahoo-finance-price/integration_tests/acceptance.py | onaio/airbyte | 22 | 3238 | <gh_stars>10-100
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import pytest
pytest_plugins = ("source_acceptance_test.plugin",)
@pytest.fixture(scope="session", autouse=True)
def connector_setup():
"""This fixture is a placeholder for external resources that acceptance test might require."""
# TODO: setup test dependencies if needed. otherwise remove the TODO comments
yield
# TODO: clean up test dependencies
| 1.625 | 2 |
ddt/__init__.py | GawenChen/test_pytest | 0 | 3239 | <filename>ddt/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
@Time : 2021/10/9 17:51
@Auth : 潇湘
@File :__init__.py.py
@IDE :PyCharm
@QQ : 810400085
""" | 1.101563 | 1 |
darts/models/linear_regression_model.py | BiancaMT25/darts | 1 | 3240 | """
Standard Regression model
-------------------------
"""
import numpy as np
import pandas as pd
from typing import Union
from ..logging import get_logger
from .regression_model import RegressionModel
from sklearn.linear_model import LinearRegression
logger = get_logger(__name__)
class LinearRegressionModel(RegressionModel):
def __init__(self,
lags: Union[int, list] = None,
lags_exog: Union[int, list, bool] = None,
**kwargs):
"""
Simple wrapper for the linear regression model in scikit-learn, LinearRegression().
Parameters
----------
lags : Union[int, list]
Number of lagged target values used to predict the next time step. If an integer is given
the last `lags` lags are used (inclusive). Otherwise a list of integers with lags is required.
lags_exog : Union[int, list, bool]
Number of lagged exogenous values used to predict the next time step. If an integer is given
the last `lags_exog` lags are used (inclusive). Otherwise a list of integers with lags is required.
If True `lags` will be used to determine lags_exog. If False, the values of all exogenous variables
at the current time `t`. This might lead to leakage if for predictions the values of the exogenous
variables at time `t` are not known.
**kwargs
Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`.
"""
self.kwargs = kwargs
super().__init__(
lags=lags,
lags_exog=lags_exog,
model=LinearRegression(**kwargs)
)
def __str__(self):
return 'LinearRegression(lags={}, lags_exog={})'.format(self.lags, self.lags_exog) | 3.25 | 3 |
hail/python/test/hailtop/utils/test_utils.py | vrautela/hail | 0 | 3241 | <reponame>vrautela/hail<gh_stars>0
from hailtop.utils import (partition, url_basename, url_join, url_scheme,
url_and_params, parse_docker_image_reference)
def test_partition_zero_empty():
assert list(partition(0, [])) == []
def test_partition_even_small():
assert list(partition(3, range(3))) == [range(0, 1), range(1, 2), range(2, 3)]
def test_partition_even_big():
assert list(partition(3, range(9))) == [range(0, 3), range(3, 6), range(6, 9)]
def test_partition_uneven_big():
assert list(partition(2, range(9))) == [range(0, 5), range(5, 9)]
def test_partition_toofew():
assert list(partition(6, range(3))) == [range(0, 1), range(1, 2), range(2, 3),
range(3, 3), range(3, 3), range(3, 3)]
def test_url_basename():
assert url_basename('/path/to/file') == 'file'
assert url_basename('https://hail.is/path/to/file') == 'file'
def test_url_join():
assert url_join('/path/to', 'file') == '/path/to/file'
assert url_join('/path/to/', 'file') == '/path/to/file'
assert url_join('/path/to/', '/absolute/file') == '/absolute/file'
assert url_join('https://hail.is/path/to', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', 'file') == 'https://hail.is/path/to/file'
assert url_join('https://hail.is/path/to/', '/absolute/file') == 'https://hail.is/absolute/file'
def test_url_scheme():
assert url_scheme('https://hail.is/path/to') == 'https'
assert url_scheme('/path/to') == ''
def test_url_and_params():
assert url_and_params('https://example.com/') == ('https://example.com/', {})
assert url_and_params('https://example.com/foo?') == ('https://example.com/foo', {})
assert url_and_params('https://example.com/foo?a=b&c=d') == ('https://example.com/foo', {'a': 'b', 'c': 'd'})
def test_parse_docker_image_reference():
x = parse_docker_image_reference('animage')
assert x.domain is None
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'animage'
assert str(x) == 'animage'
x = parse_docker_image_reference('hailgenetics/animage')
assert x.domain == 'hailgenetics'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'hailgenetics/animage'
assert str(x) == 'hailgenetics/animage'
x = parse_docker_image_reference('localhost:5000/animage')
assert x.domain == 'localhost:5000'
assert x.path == 'animage'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/animage'
assert str(x) == 'localhost:5000/animage'
x = parse_docker_image_reference('localhost:5000/a/b/name')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest is None
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag'
x = parse_docker_image_reference('localhost:5000/a/b/name:tag@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag == 'tag'
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name:tag@sha256:abc123'
x = parse_docker_image_reference('localhost:5000/a/b/name@sha256:abc123')
assert x.domain == 'localhost:5000'
assert x.path == 'a/b/name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'localhost:5000/a/b/name'
assert str(x) == 'localhost:5000/a/b/name@sha256:abc123'
x = parse_docker_image_reference('name@sha256:abc123')
assert x.domain is None
assert x.path == 'name'
assert x.tag is None
assert x.digest == 'sha256:abc123'
assert x.name() == 'name'
assert str(x) == 'name@sha256:abc123'
x = parse_docker_image_reference('gcr.io/hail-vdc/batch-worker:123fds312')
assert x.domain == 'gcr.io'
assert x.path == 'hail-vdc/batch-worker'
assert x.tag == '123fds312'
assert x.digest is None
assert x.name() == 'gcr.io/hail-vdc/batch-worker'
assert str(x) == 'gcr.io/hail-vdc/batch-worker:123fds312'
x = parse_docker_image_reference('us-docker.pkg.dev/my-project/my-repo/test-image')
assert x.domain == 'us-docker.pkg.dev'
assert x.path == 'my-project/my-repo/test-image'
assert x.tag is None
assert x.digest is None
assert x.name() == 'us-docker.pkg.dev/my-project/my-repo/test-image'
assert str(x) == 'us-docker.pkg.dev/my-project/my-repo/test-image'
| 2.453125 | 2 |
hood/urls.py | wadi-1000/Vicinity | 0 | 3242 | <reponame>wadi-1000/Vicinity
from django.urls import path,include
from . import views
urlpatterns = [
path('home/', views.home, name = 'home'),
path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'),
path('viewhood/',views.viewHood, name = 'viewhood'),
path('hood/<int:pk>/',views.hood, name = 'hood'),
path('add_bizna/',views.uploadBuisness, name = 'add_bizna'),
path('bizna/',views.viewBizna, name = 'view_bizna'),
path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'),
path('post/',views.create_post, name = 'post'),
path('posts/',views.viewPost, name = 'posts'),
path('searchbizna/', views.searchBizna, name="search_results"),
path('searchhood/', views.searchHood, name="search_res"),
path('join_hood/<id>', views.join_neighbourhood, name='join-hood'),
path('leave_hood/<id>', views.leave_neighbourhood, name='leave-hood'),
] | 1.867188 | 2 |
src/licensedcode/tokenize.py | chetanya-shrimali/scancode-toolkit | 0 | 3243 | <reponame>chetanya-shrimali/scancode-toolkit<filename>src/licensedcode/tokenize.py
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from itertools import islice
from itertools import izip
import re
from zlib import crc32
from textcode.analysis import text_lines
"""
Utilities to break texts in lines and tokens (aka. words) with specialized version
for queries and rules texts.
"""
def query_lines(location=None, query_string=None, strip=True):
"""
Return an iterable of text lines given a file at `location` or a
`query string`. Include empty lines.
"""
# TODO: OPTIMIZE: tokenizing line by line may be rather slow
# we could instead get lines and tokens at once in a batch?
lines = []
if location:
lines = text_lines(location, demarkup=False)
elif query_string:
if strip:
keepends = False
else:
keepends = True
lines = query_string.splitlines(keepends)
for line in lines:
if strip:
yield line.strip()
else:
yield line
# Split on whitespace and punctuations: keep only characters
# and + in the middle or end of a word.
# Keeping the trailing + is important for licenses name such as GPL2+
query_pattern = '[^\W_]+\+?[^\W_]*'
word_splitter = re.compile(query_pattern, re.UNICODE).findall
def query_tokenizer(text, lower=True):
"""
Return an iterable of tokens from a unicode query text.
"""
if not text:
return []
text = lower and text.lower() or text
return (token for token in word_splitter(text) if token)
# Alternate pattern used for matched text collection
not_query_pattern = '[\W_+]+[\W_]?'
# collect tokens and non-token texts in two different groups
_text_capture_pattern = '(?P<token>' + query_pattern + ')' + '|' + '(?P<punct>' + not_query_pattern + ')'
tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer
def matched_query_text_tokenizer(text):
"""
Return an iterable of tokens and non-tokens from a unicode query text keeping
everything (including punctuations, line endings, etc.)
The returned iterable contains 2-tuples of:
- True if the string is a text token or False if this is not (such as punctuation, spaces, etc).
- the corresponding string
This is used to reconstruct the matched query text accurately.
"""
if not text:
return
for match in tokens_and_non_tokens(text):
if not match:
continue
mgd = match.groupdict()
token = mgd.get('token')
punct = mgd.get('punct')
if token or punct:
yield (True, token) if token else (False, punct)
# Template-aware splitter, keeping a templated part {{anything}} as a token.
# This splitter yields plain token strings or double braces-enclosed strings
# {{something}} for templates. curly barces are otherwise treated as punctuation.
# A template part is anything enclosed in double braces
template_pattern = '\{\{[^{}]*\}\}'
rule_pattern = '%s|%s+' % (query_pattern, template_pattern,)
template_splitter = re.compile(rule_pattern , re.UNICODE).findall
def rule_tokenizer(text, lower=True):
"""
Return an iterable of tokens from a unicode rule text, skipping templated
parts, including leading and trailing templated parts.
For example:
>>> list(rule_tokenizer(''))
[]
>>> list(rule_tokenizer('some Text with spAces! + _ -'))
[u'some', u'text', u'with', u'spaces']
Unbalanced templates are handled correctly:
>>> list(rule_tokenizer('{{}some }}Text with spAces! + _ -'))
[u'some', u'text', u'with', u'spaces']
Templates are handled and skipped for templated sequences:
>>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))
[u'some', u'text', u'with', u'spaces']
"""
if not text:
return []
text = lower and text.lower() or text
tokens = template_splitter(text)
# skip templates
return (token for token in tokens if token and not token.startswith('{{'))
def ngrams(iterable, ngram_length):
"""
Return an iterable of ngrams of length `ngram_length` given an iterable.
Each ngram is a tuple of ngram_length items.
The returned iterable is empty if the input iterable contains less than
`ngram_length` items.
Note: this is a fairly arcane but optimized way to compute ngrams.
For example:
>>> list(ngrams([1,2,3,4,5], 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 4))
[(1, 2, 3, 4), (2, 3, 4, 5)]
>>> list(ngrams([1,2,3,4], 2))
[(1, 2), (2, 3), (3, 4)]
>>> list(ngrams([1,2,3], 2))
[(1, 2), (2, 3)]
>>> list(ngrams([1,2], 2))
[(1, 2)]
>>> list(ngrams([1], 2))
[]
This also works with arrays or tuples:
>>> from array import array
>>> list(ngrams(array(b'h', [1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams(tuple([1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
"""
return izip(*(islice(iterable, i, None) for i in range(ngram_length)))
def select_ngrams(ngrams, with_pos=False):
"""
Return an iterable as a subset of a sequence of ngrams using the hailstorm
algorithm. If `with_pos` is True also include the starting position for the ngram
in the original sequence.
Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf
The algorithm first fingerprints every token and then selects a shingle s if
the minimum fingerprint value of all k tokens in s occurs at the first or the
last position of s (and potentially also in between). Due to the
probabilistic properties of Rabin fingerprints the probability that a shingle
is chosen is 2/k if all tokens in the shingle are different.
For example:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (2, 6, 1), (7, 3, 4)]
Positions can also be included. In this case, tuple of (pos, ngram) are returned:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)], with_pos=True))
[(0, (2, 1, 3)), (1, (1, 1, 3)), (3, (2, 6, 1)), (4, (7, 3, 4))]
This works also from a generator:
>>> list(select_ngrams(x for x in [(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (2, 6, 1), (7, 3, 4)]
"""
last = None
for i, ngram in enumerate(ngrams):
# FIXME: use a proper hash
nghs = [crc32(str(ng)) for ng in ngram]
min_hash = min(nghs)
if with_pos:
ngram = (i, ngram,)
if nghs[0] == min_hash or nghs[-1] == min_hash:
yield ngram
last = ngram
else:
# always yield the first or last ngram too.
if i == 0:
yield ngram
last = ngram
if last != ngram:
yield ngram
| 1.570313 | 2 |
etest_test/fixtures_test/ebuilds_test/__init__.py | alunduil/etest | 6 | 3244 | <filename>etest_test/fixtures_test/ebuilds_test/__init__.py
"""Ebuild Test Fixtures."""
import os
from typing import Any, Dict, List
from etest_test import helpers_test
EBUILDS: Dict[str, List[Dict[str, Any]]] = {}
helpers_test.import_directory(__name__, os.path.dirname(__file__))
| 1.765625 | 2 |
src/model.py | palucki/RememberIt | 0 | 3245 | import random
from pymongo import MongoClient
from observable import Observable
from phrase import Phrase
class MongoDbProxy:
"""Proxy for MongoDB"""
def __init__(self, url, dbName, tableName):
self.client = MongoClient(url)
self.db = self.client[dbName]
self.table = tableName
self.count = self.db[self.table].find().count()
def get_db(self):
return self.db
def add_phrase(self, phrase):
#[{ "english": eng, "polish" : pl}]
record = {"english" : phrase.eng, "polish" : phrase.meanings}
self.db[self.table].insert(record)
self.count = self.db[self.table].find().count()
def show_one(self, phrase):
print("eng: \'%s\' pol: \'%s\'" % (phrase["english"], phrase["polish"]))
def get_all(self):
#define your data struct here
words = {}
for i, phrase in enumerate(self.db[self.table].find()):
eng = phrase["english"]
#lang = phrase["lang"]
meaning = phrase["polish"]
words[eng] = meaning
return words
def show_all(self):
if self.count > 0:
for i, phrase in enumerate(self.db[self.table].find()):
print(i, end=" ")
self.show_one(phrase)
else:
print("Database is empty")
def show_random(self):
entries = self.db[self.table].find()
self.count = entries.count()
if self.count > 0:
self.show_one(entries[random.randrange(self.count)])
else:
print("Database is empty")
def record_exists(self, eng):
if self.db[self.table].find_one({"english" : eng}):
return True
else:
return False
def drop_record(self, eng):
self.db[self.table].delete_one({"english":eng})
def drop_db(self):
print("Dropping")
self.db.self.table.drop()
self.count = self.db[self.table].find().count()
class Model:
"""That needs a table of pairs - eng and its meanings"""
def __init__(self):
self.phrases = Observable({})
self.db = MongoDbProxy("mongodb://localhost:27017/", "RepeatItDb", "phrases")
data = self.db.get_all()
self.phrases.setData(data)
def addWord(self, key, lang, meanings):
newData = self.phrases.getData()
newData[key] = meanings
self.phrases.setData(newData)
def getAllWords(self):
return self.phrases.getData()
def removeWord(self, key):
newData = self.phrases.getData()
newData.pop(key)
self.phrases.setData(newData)
def saveWord(self, wordAndMeaning):
word = wordAndMeaning[0]
meaning = wordAndMeaning[1]
self.addWord(word, "pl", meaning)
def saveDb(self):
dbData = self.db.get_all()
modelData = self.getAllWords()
#That's for future optimization: update db instead of adding it all
dbKeysSet = set(dbData.keys())
dbValuesSet = set(dbData.values())
modelKeysSet = set(modelData.keys())
modelValuesSet = set(modelData.values())
newRecordsKeys = modelKeysSet - dbKeysSet
deletedRecordsKeys = dbKeysSet - modelKeysSet
if len(newRecordsKeys):
for newKey in newRecordsKeys:
self.db.add_phrase(Phrase(newKey, "pl", modelData[newKey]))
if len(deletedRecordsKeys):
for deletedKey in deletedRecordsKeys:
self.db.drop_record(deletedKey)
#Handle also value update
print("Saving database...")
| 2.78125 | 3 |
sampleApplication/clientGenerator.py | chall68/BlackWatch | 0 | 3246 | #!flask/bin/python
#from user import User
from sampleObjects.User import User
from datetime import datetime
from sampleObjects.DetectionPoint import DetectionPoint
import time, requests, random, atexit
def requestGenerator():
userObject = randomUser()
detectionPointObject = randomDetectionPoint()
req = requests.post('http://localhost:5000/addevent', json = {"User": userObject.__dict__, "DetectionPoint" : detectionPointObject.__dict__, "Time" : str(datetime.now().isoformat())})
print (req.text)
checkResp = requests.get('http://localhost:5000/getResponses')
print (checkResp.text)
def randomUser():
user = random.randint(1,3)
attacker=0
if (user==1):
attacker = User("Phillipo", "255.255.255.101", "xxxx")
elif (user==2):
attacker = User("Sergio", "192.168.127.12", "yyyy")
elif (user==3):
attacker = User("Anonymous", "172.16.31.10", "354343jjk23")
return attacker
def randomDetectionPoint():
rand = random.randint(1,2)
dp=0
if (rand==1):
dp = DetectionPoint("HTTP Verb", "GET Request used where POST is expected")
elif (rand==2):
dp = DetectionPoint("Login Page", "Hidden field altered within the login form")
return dp
for i in range (50):
requestGenerator()
time.sleep(1.5)
def closingTime():
print ("Exiting")
atexit.register(closingTime)
| 2.703125 | 3 |
news_collector/collector/consumers.py | ridwaniyas/channels-examples | 0 | 3247 | <gh_stars>0
import asyncio
import json
import datetime
from aiohttp import ClientSession
from channels.generic.http import AsyncHttpConsumer
from .constants import BLOGS
class NewsCollectorAsyncConsumer(AsyncHttpConsumer):
"""
Async HTTP consumer that fetches URLs.
"""
async def handle(self, body):
# Adapted from:
# "Making 1 million requests with python-aiohttp"
# https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
tasks = []
loop = asyncio.get_event_loop()
# aiohttp allows a ClientSession object to link all requests together
t0 = datetime.datetime.now()
async with ClientSession() as session:
for name, url in BLOGS.items():
print('Start downloading "%s"' % name)
# Launch a coroutine for each URL fetch
task = loop.create_task(fetch(url, session))
tasks.append(task)
# Wait on, and then gather, all responses
responses = await asyncio.gather(*tasks)
dt = (datetime.datetime.now() - t0).total_seconds()
print('All downloads completed; elapsed time: {} [s]'.format(dt))
# asyncio.gather returns results in the order of the original sequence,
# so we can safely zip these together.
data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses]))
text = json.dumps(data)
# We have to send a response using send_response rather than returning
# it in Channels' async HTTP consumer
await self.send_response(200,
text.encode(),
headers=[
("Content-Type", "application/json"),
]
)
| 3.234375 | 3 |
src/randomcsv/FileUtils.py | PhilipBuhr/randomCsv | 0 | 3248 | import os
from pathlib import Path
def write(file_name, content):
Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True)
with open(file_name, 'w') as file:
file.write(content)
def read_line_looping(file_name, count):
i = 0
lines = []
file = open(file_name, 'r')
line = file.readline()
if line == '':
raise EmptyFileError(f'Error: Dictionary {file_name} seems to be empty')
while i < count:
lines.append(line.strip())
i += 1
line = file.readline()
if line == '':
file.close()
file = open(file_name, 'r')
line = file.readline()
file.close()
return lines
class EmptyFileError(Exception):
pass
| 3.546875 | 4 |
stringtoiso/__init__.py | vats98754/stringtoiso | 0 | 3249 | from stringtoiso.convert_to_iso import convert | 1.15625 | 1 |
aux_sys_err_prediction_module/additive/R_runmed_spline/my_R_runmed_spline_analysis.py | PNNL-Comp-Mass-Spec/DtaRefinery | 0 | 3250 | from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline
from numpy import random, array, median, zeros, arange, hstack
from win32com.client import Dispatch
import math
myName = 'R_runmed_spline'
useMAD = True # use median absolute deviations instead of sum of squared residues
# -----------------------------------------------------------------------
def R_runmed_spline_MAIN(ARG3, Controller):
pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName]
# ARG3
x = ARG3[0][0]
y = ARG3[0][1]
sc = Dispatch("StatConnectorSrv.StatConnector")
sc.Init("R")
# get the best smoothing parameter
bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars)
# get the prediction error for this smoothing parameter
bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars)
# compare with original SSE
# is fit successful?
# return isSuccessfulFit, yFit, yEval, runMedData
SSE = sum(y ** 2)
MAD = 1.4826 * median(abs(y))
if useMAD:
SSE = MAD
if bestPredErr < SSE:
isSuccessfulFit = True
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars)
yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars)
#
ppmArrs[ind] = [yFit, yEval]
else:
isSuccessfulFit = False
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit = zeros(len(x), 'd')
yEval = zeros(len(xEval), 'd')
#
ppmArrs[ind] = [yFit, yEval]
sc.Close()
return isSuccessfulFit, bestPredErr, ppmArrs
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars):
sparRange = array([float(i) for i in pars['spar range'].split(',')])
sparStepsNum = int(pars['spar steps number'])
sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum, 5)
sparSet = arange(sparRange[0], sparRange[1], sparStep)
predErrSet = zeros(len(sparSet), 'd')
for i in range(len(sparSet)):
predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars)
predErrSet[i] = predErr
## p(zip(sparSet, predErrSet))
spar = sparSet[predErrSet == min(predErrSet)][-1] # take the last one (smoothest) if there are few
## print('spar ', spar)
return spar
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_predErr(x, y, **kwargs):
"""
just returns the prediction error
"""
K = int(kwargs['K'])
# --Related to K-fold CV---------------------------
L = len(x)
N = L / K ##min length of pieces
W = list(range(L))
Z = list(range(1, K + 1))
Z = [N for j in Z]
R = L % K
Z[0:R] = [j + 1 for j in Z[0:R]] # length of the pieces
random.shuffle(W)
ind = 0
predErr = 0
allResiduals = array([])
SSE = sum(y ** 2) # VLAD. Why do I need this???
# ---running through K training/testings-------------
for val in Z:
j = math.floor(val)
# ---making training/testing subsets-------------
test = W[ind:ind + j]
test.sort()
train = W[0:ind] + W[ind + j:]
train.sort()
ind += j
# -----------------------------------------------
# ---fit runmed_spline here----------------------
yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs)
residualsTest = y[test] - yFit
predErr += sum(residualsTest ** 2)
allResiduals = hstack((allResiduals, residualsTest))
# -----------------------------------------------
if useMAD:
predErr = 1.4826 * median(abs(allResiduals))
return predErr
# -----------------------------------------------------------------------
if __name__ == '__main__':
from numpy import linspace, cos, lexsort, zeros, sin
from pylab import plot, show, subplot, savefig, clf, ylim
from pprint import pprint as p
from time import clock as c
x1 = linspace(0, 30, 300)
## y1 = cos(x1)
## y1 = zeros(len(x1),'d') #nice test
y1 = x1 * 0.03
y1 += random.normal(scale=0.2, size=y1.shape)
ind = lexsort(keys=(y1, x1))
x1 = x1[ind]
y1 = y1[ind]
t1 = c()
isSuccessfulFit, yFit, yEval, runMedData, predErr = \
R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1])
t2 = c()
print('done in %s seconds' % (t2 - t1))
subplot(211)
plot(x1, y1, 'bo')
plot(runMedData[0], runMedData[1], 'y^')
plot(x1, yEval, 'r+-')
ylim([-1.5, +1.5])
subplot(212)
plot(x1, y1 - yEval, 'go')
ylim([-1.5, +1.5])
show()
| 2.359375 | 2 |
setup.py | Ms2ger/python-zstandard | 0 | 3251 | #!/usr/bin/env python
# Copyright (c) 2016-present, <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import os
import sys
from setuptools import setup
try:
import cffi
except ImportError:
cffi = None
import setup_zstd
SUPPORT_LEGACY = False
SYSTEM_ZSTD = False
WARNINGS_AS_ERRORS = False
if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''):
WARNINGS_AS_ERRORS = True
if '--legacy' in sys.argv:
SUPPORT_LEGACY = True
sys.argv.remove('--legacy')
if '--system-zstd' in sys.argv:
SYSTEM_ZSTD = True
sys.argv.remove('--system-zstd')
if '--warnings-as-errors' in sys.argv:
WARNINGS_AS_ERRORS = True
sys.argv.remote('--warning-as-errors')
# Code for obtaining the Extension instance is in its own module to
# facilitate reuse in other projects.
extensions = [
setup_zstd.get_c_extension(name='zstd',
support_legacy=SUPPORT_LEGACY,
system_zstd=SYSTEM_ZSTD,
warnings_as_errors=WARNINGS_AS_ERRORS),
]
install_requires = []
if cffi:
import make_cffi
extensions.append(make_cffi.ffi.distutils_extension())
# Need change in 1.10 for ffi.from_buffer() to handle all buffer types
# (like memoryview).
# Need feature in 1.11 for ffi.gc() to declare size of objects so we avoid
# garbage collection pitfalls.
install_requires.append('cffi>=1.11')
version = None
with open('c-ext/python-zstandard.h', 'r') as fh:
for line in fh:
if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'):
continue
version = line.split()[2][1:-1]
break
if not version:
raise Exception('could not resolve package version; '
'this should never happen')
setup(
name='zstandard',
version=version,
description='Zstandard bindings for Python',
long_description=open('README.rst', 'r').read(),
url='https://github.com/indygreg/python-zstandard',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='zstandard zstd compression',
packages=['zstandard'],
ext_modules=extensions,
test_suite='tests',
install_requires=install_requires,
)
| 1.695313 | 2 |
Escolas/Curso em Video/Back-End/Curso de Python/Mundos/Mundo 01/Exercicio_16.py | c4st1lh0/Projetos-de-Aula | 0 | 3252 | <gh_stars>0
import math
num = float(input('Digite um numero real qualquer: '))
print('O numero: {} tem a parte inteira {}'.format(num, math.trunc(num))) | 3.625 | 4 |
mmdet/ops/orn/functions/__init__.py | JarvisUSTC/DARDet | 274 | 3253 | <filename>mmdet/ops/orn/functions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .active_rotating_filter import active_rotating_filter
from .active_rotating_filter import ActiveRotatingFilter
from .rotation_invariant_encoding import rotation_invariant_encoding
from .rotation_invariant_encoding import RotationInvariantEncoding
from .rotation_invariant_pooling import RotationInvariantPooling
__all__ = ['ActiveRotatingFilter', 'active_rotating_filter', 'rotation_invariant_encoding', 'RotationInvariantEncoding', 'RotationInvariantPooling'] | 1.289063 | 1 |
sympy/core/tests/test_cache.py | eriknw/sympy | 7 | 3254 | <reponame>eriknw/sympy
from sympy.core.cache import cacheit
def test_cacheit_doc():
@cacheit
def testfn():
"test docstring"
pass
assert testfn.__doc__ == "test docstring"
assert testfn.__name__ == "testfn"
| 2.25 | 2 |
mmdet/models/losses/ranking_losses.py | VietDunghacker/VarifocalNet | 0 | 3255 | <gh_stars>0
import torch
class RankSort(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets > 0.)
fg_logits = logits[fg_labels]
fg_targets = targets[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta_RS
relevant_bg_labels=((targets==0) & (logits>=threshold_logit))
relevant_bg_logits = logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
sorting_error=torch.zeros(fg_num).cuda()
ranking_error=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
# Difference Transforms (x_ij)
fg_relations=fg_logits-fg_logits[ii]
bg_relations=relevant_bg_logits-fg_logits[ii]
if delta_RS > 0:
fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1)
bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1)
else:
fg_relations = (fg_relations >= 0).float()
bg_relations = (bg_relations >= 0).float()
# Rank of ii among pos and false positive number (bg with larger scores)
rank_pos=torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
# Rank of ii among all examples
rank=rank_pos+FP_num
# Ranking error of example ii. target_ranking_error is always 0. (Eq. 7)
ranking_error[ii]=FP_num/rank
# Current sorting error of example ii. (Eq. 7)
current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos
#Find examples in the target sorted order for example ii
iou_relations = (fg_targets >= fg_targets[ii])
target_sorted_order = iou_relations * fg_relations
#The rank of ii among positives in sorted order
rank_pos_target = torch.sum(target_sorted_order)
#Compute target sorting error. (Eq. 8)
#Since target ranking error is 0, this is also total target error
target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target
#Compute sorting error on example ii
sorting_error[ii] = current_sorting_error - target_sorting_error
#Identity Update for Ranking Error
if FP_num > eps:
#For ii the update is the ranking error
fg_grad[ii] -= ranking_error[ii]
#For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num)
relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num))
#Find the positives that are misranked (the cause of the error)
#These are the ones with smaller IoU but larger logits
missorted_examples = (~ iou_relations) * fg_relations
#Denominotor of sorting pmf
sorting_pmf_denom = torch.sum(missorted_examples)
#Identity Update for Sorting Error
if sorting_pmf_denom > eps:
#For ii the update is the sorting error
fg_grad[ii] -= sorting_error[ii]
#For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom)
fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom))
#Normalize gradients by number of positives
classification_grads[fg_labels]= (fg_grad/fg_num)
classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num)
ctx.save_for_backward(classification_grads)
return ranking_error.mean(), sorting_error.mean()
@staticmethod
def backward(ctx, out_grad1, out_grad2):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None, None
class aLRPLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets == 1)
fg_logits = logits[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta
#Get valid bg logits
relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
relevant_bg_logits=logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
rank=torch.zeros(fg_num).cuda()
prec=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
max_prec=0
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
#x_ij s as score differences with fgs
fg_relations=fg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with fgs
fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
#Discard i=j in the summation in rank_pos
fg_relations[ii]=0
#x_ij s as score differences with bgs
bg_relations=relevant_bg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with bgs
bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
#Compute the rank of the example within fgs and number of bgs with larger scores
rank_pos=1+torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
#Store the total since it is normalizer also for aLRP Regression error
rank[ii]=rank_pos+FP_num
#Compute precision for this example to compute classification loss
prec[ii]=rank_pos/rank[ii]
#For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads
if FP_num > eps:
fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii]
relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num))
#aLRP with grad formulation fg gradient
classification_grads[fg_labels]= fg_grad
#aLRP with grad formulation bg gradient
classification_grads[relevant_bg_labels]= relevant_bg_grad
classification_grads /= (fg_num)
cls_loss=1-prec.mean()
ctx.save_for_backward(classification_grads)
return cls_loss, rank, order
@staticmethod
def backward(ctx, out_grad1, out_grad2, out_grad3):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None, None, None
class APLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, delta=1.):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets == 1)
fg_logits = logits[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta
#Get valid bg logits
relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
relevant_bg_logits=logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
rank=torch.zeros(fg_num).cuda()
prec=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
max_prec=0
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
#x_ij s as score differences with fgs
fg_relations=fg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with fgs
fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
#Discard i=j in the summation in rank_pos
fg_relations[ii]=0
#x_ij s as score differences with bgs
bg_relations=relevant_bg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with bgs
bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
#Compute the rank of the example within fgs and number of bgs with larger scores
rank_pos=1+torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
#Store the total since it is normalizer also for aLRP Regression error
rank[ii]=rank_pos+FP_num
#Compute precision for this example
current_prec=rank_pos/rank[ii]
#Compute interpolated AP and store gradients for relevant bg examples
if (max_prec<=current_prec):
max_prec=current_prec
relevant_bg_grad += (bg_relations/rank[ii])
else:
relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec)))
#Store fg gradients
fg_grad[ii]=-(1-max_prec)
prec[ii]=max_prec
#aLRP with grad formulation fg gradient
classification_grads[fg_labels]= fg_grad
#aLRP with grad formulation bg gradient
classification_grads[relevant_bg_labels]= relevant_bg_grad
classification_grads /= fg_num
cls_loss=1-prec.mean()
ctx.save_for_backward(classification_grads)
return cls_loss
@staticmethod
def backward(ctx, out_grad1):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None
| 2.234375 | 2 |
tests/test_dcd_api.py | sadamek/pyIMX | 0 | 3256 | # Copyright (c) 2017-2018 <NAME>
#
# SPDX-License-Identifier: BSD-3-Clause
# The BSD-3-Clause license for this file can be found in the LICENSE file included with this distribution
# or at https://spdx.org/licenses/BSD-3-Clause.html#licenseText
import os
import pytest
from imx import img
# Used Directories
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
# Test Files
DCD_TXT = os.path.join(DATA_DIR, 'dcd_test.txt')
DCD_BIN = os.path.join(DATA_DIR, 'dcd_test.bin')
def setup_module(module):
# Prepare test environment
pass
def teardown_module(module):
# Clean test environment
pass
def test_txt_parser():
with open(DCD_TXT, 'r') as f:
dcd_obj = img.SegDCD.parse_txt(f.read())
assert dcd_obj is not None
assert len(dcd_obj) == 12
def test_bin_parser():
with open(DCD_BIN, 'rb') as f:
dcd_obj = img.SegDCD.parse(f.read())
assert dcd_obj is not None
assert len(dcd_obj) == 12
| 2.0625 | 2 |
recentjson.py | nydailynews/feedutils | 0 | 3257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Return recent items from a json feed. Recent means "In the last X days."
import os
import doctest
import json
import urllib2
import argparse
import types
import gzip
from datetime import datetime, timedelta
from time import mktime
class RecentJson:
""" Methods for ingesting and publishing JSON feeds.
>>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'
>>> parser = build_parser()
>>> args = parser.parse_args([url])
>>> rj = RecentJson(args)
"""
def __init__(self, args={}):
self.args = args
if not hasattr(self.args, 'days'):
self.args.days = 0
self.days = self.args.days
self.date_format = '%a, %d %b %Y %X'
def get(self, url):
""" Wrapper for API requests. Take a URL, return a json array.
>>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'
>>> parser = build_parser()
>>> args = parser.parse_args([url])
>>> rj = RecentJson(args)
>>> rj.get(url)
True
"""
response = urllib2.urlopen(url)
if int(response.code) >= 400:
if 'verbose' in self.args and self.args.verbose:
print "URL: %s" % url
raise ValueError("URL %s response: %s" % (url, response['status']))
self.xml = response.read()
return True
def parse(self):
""" Turn the xml into an object.
>>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'
>>> parser = build_parser()
>>> args = parser.parse_args([url])
>>> rj = RecentJson(args)
>>> rj.get(url)
True
>>> xml = rj.parse()
>>> print len(xml)
50
"""
try:
p = json.loads(self.xml)
except:
# Sometimes we download gzipped documents from the web.
fh = open('json.gz', 'wb')
fh.write(self.xml)
fh.close()
try:
gz = gzip.GzipFile('json.gz', 'r').read()
p = json.loads(gzip.GzipFile('json.gz', 'r').read())
except IOError:
return None
self.p = p
return p
def recently(self):
""" Return a feedparser entry object for the last X days of feed entries.
>>> url = 'http://www.nydailynews.com/json/cmlink/aaron-judge-1.3306628'
>>> parser = build_parser()
>>> args = parser.parse_args([url])
>>> rj = RecentJson(args)
>>> rj.get(url)
True
>>> xml = rj.parse()
>>> articles = rj.recently()
"""
items = []
for item in self.p:
# print item.keys()
# [u'body', u'tags', u'url', u'contentId', u'abstract', u'author', u'lastUpdated', u'mobileTitle', u'mobileUrl', u'publish_date', u'images', u'title', u'type', u'categories']
# print item['publish_date']
# Fri, 7 Jul 2017 15:16:38 -0400
#dt = datetime.strptime(item['publish_date'], '%a, %d %b %Y %X %z')
dt = datetime.strptime(' '.join(item['publish_date'].split(' ')[:5]), self.date_format)
delta = datetime.today() - dt
if delta.days > int(self.days):
continue
items.append(item)
if 'verbose' in self.args and self.args.verbose:
print delta.days, dt
self.items = items
return items
def pretty_date(ago):
""" Process a timedelta object.
From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python
"""
second_diff = ago.seconds
day_diff = ago.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff / 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff / 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
if day_diff / 7 == 1:
return str(day_diff / 7) + " week ago"
return str(day_diff / 7) + " weeks ago"
if day_diff < 365:
if day_diff / 30 == 1:
return str(day_diff / 30) + " month ago"
return str(day_diff / 30) + " months ago"
if day_diff / 365 == 1:
return str(day_diff / 365) + " year ago"
return str(day_diff / 365) + " years ago"
def main(args):
""" For command-line use.
"""
rj = RecentJson(args)
if args:
articles = []
for arg in args.urls[0]:
if args.verbose:
print arg
rj.get(arg)
try:
p = rj.parse()
except:
continue
if not p:
continue
articles.append(rj.recently())
if len(articles) is 0:
return None
for i, article in enumerate(articles[0]):
if i >= args.limit and args.limit > 0:
break
dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X')
ago = datetime.now() - dt
# print ago
# 2 days, 15:57:48.578638
if args.output == 'html':
if type(article['title']) is types.UnicodeType:
article['title'] = article['title'].encode('utf-8', 'replace')
if args.listitem == True:
print '<li><a href="{0}">{1}</a> <span>({2})</span></li>'.format(article['url'], article['title'], pretty_date(ago).lower())
elif args.nostamp == True:
print '<li><a href="{0}">{1}</a></li>'.format(article['url'], article['title'], pretty_date(ago).lower())
else:
print '<a href="{0}">{1}</a> <span>({2})</span>'.format(article['url'], article['title'], pretty_date(ago).lower())
if args.output == 'js':
if type(article['title']) is types.UnicodeType:
article['title'] = article['title'].encode('utf-8', 'replace')
print 'var hed = "<a href=\'{0}\'>{1}</a> <span>({2})</span>";'.format(article['url'], article['title'].replace('"', '\\\\"'), pretty_date(ago).lower())
elif args.output == 'json':
print json.dumps({'title': article['title'],
'id': article['id'],
'description': article['description']})
elif args.output == 'csv':
dt = datetime.strptime(' '.join(article['publish_date'].split(' ')[:5]), '%a, %d %b %Y %X')
article['datetime'] = '%s-%s-%s' % (dt.year, dt.month, dt.day)
if dt.month < 10:
article['datetime'] = '%d-0%d-%d' % (dt.year, dt.month, dt.day)
if dt.day < 10:
article['datetime'] = '%d-0%d-0%d' % (dt.year, dt.month, dt.day)
article['slug'] = article['title'].lower().replace(' ', '-').replace('--', '-').replace(':', '')
article['iframe_url'] = article['media_player']['url']
article['image_url'] = article['media_thumbnail'][0]['url']
article['image_large_url'] = article['media_thumbnail'][1]['url']
article['description'] = article['description'].replace('"', "'")
# date,title,id,slug,player_url,image_url,image_large_url,keywords,description
print '%(datetime)s,"%(title)s",%(id)s,%(slug)s,%(iframe_url)s,%(image_url)s,%(image_large_url)s,"%(media_keywords)s","%(description)s"' % article
def build_parser():
""" We put the argparse in a method so we can test it
outside of the command-line.
"""
parser = argparse.ArgumentParser(usage='$ python recentjson.py http://domain.com/json/',
description='''Takes a list of URLs passed as args.
Returns the items published today unless otherwise specified.''',
epilog='')
parser.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true")
parser.add_argument("--test", dest="test", default=False, action="store_true")
parser.add_argument("-d", "--days", dest="days", default=0)
parser.add_argument("-l", "--limit", dest="limit", default=0, type=int)
parser.add_argument("-o", "--output", dest="output", default="html", type=str)
parser.add_argument("--li", dest="listitem", default=False, action="store_true")
parser.add_argument("--ns", dest="nostamp", default=False, action="store_true")
parser.add_argument("urls", action="append", nargs="*")
return parser
if __name__ == '__main__':
"""
"""
parser = build_parser()
args = parser.parse_args()
if args.test:
doctest.testmod(verbose=args.verbose)
main(args)
| 3.484375 | 3 |
maint/MultiStage2.py | Liastre/pcre2 | 0 | 3258 | <gh_stars>0
#! /usr/bin/python
# Multistage table builder
# (c) <NAME>, 2008
##############################################################################
# This script was submitted to the PCRE project by <NAME>owski as part of
# the upgrading of Unicode property support. The new code speeds up property
# matching many times. The script is for the use of PCRE maintainers, to
# generate the pcre2_ucd.c file that contains a digested form of the Unicode
# data tables. A number of extensions have been added to the original script.
#
# The script has now been upgraded to Python 3 for PCRE2, and should be run in
# the maint subdirectory, using the command
#
# [python3] ./MultiStage2.py >../src/pcre2_ucd.c
#
# It requires six Unicode data tables: DerivedGeneralCategory.txt,
# GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt,
# CaseFolding.txt, and emoji-data.txt. These must be in the
# maint/Unicode.tables subdirectory.
#
# DerivedGeneralCategory.txt is found in the "extracted" subdirectory of the
# Unicode database (UCD) on the Unicode web site; GraphemeBreakProperty.txt is
# in the "auxiliary" subdirectory. Scripts.txt, ScriptExtensions.txt, and
# CaseFolding.txt are directly in the UCD directory. The emoji-data.txt file is
# in files associated with Unicode Technical Standard #51 ("Unicode Emoji"),
# for example:
#
# http://unicode.org/Public/emoji/11.0/emoji-data.txt
#
# -----------------------------------------------------------------------------
# Minor modifications made to this script:
# Added #! line at start
# Removed tabs
# Made it work with Python 2.4 by rewriting two statements that needed 2.5
# Consequent code tidy
# Adjusted data file names to take from the Unicode.tables directory
# Adjusted global table names by prefixing _pcre_.
# Commented out stuff relating to the casefolding table, which isn't used;
# removed completely in 2012.
# Corrected size calculation
# Add #ifndef SUPPORT_UCP to use dummy tables when no UCP support is needed.
# Update for PCRE2: name changes, and SUPPORT_UCP is abolished.
#
# Major modifications made to this script:
# Added code to add a grapheme break property field to records.
#
# Added code to search for sets of more than two characters that must match
# each other caselessly. A new table is output containing these sets, and
# offsets into the table are added to the main output records. This new
# code scans CaseFolding.txt instead of UnicodeData.txt, which is no longer
# used.
#
# Update for Python3:
# . Processed with 2to3, but that didn't fix everything
# . Changed string.strip to str.strip
# . Added encoding='utf-8' to the open() call
# . Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is
# required and the result of the division is a float
#
# Added code to scan the emoji-data.txt file to find the Extended Pictographic
# property, which is used by PCRE2 as a grapheme breaking property. This was
# done when updating to Unicode 11.0.0 (July 2018).
#
# Added code to add a Script Extensions field to records. This has increased
# their size from 8 to 12 bytes, only 10 of which are currently used.
#
# 01-March-2010: Updated list of scripts for Unicode 5.2.0
# 30-April-2011: Updated list of scripts for Unicode 6.0.0
# July-2012: Updated list of scripts for Unicode 6.1.0
# 20-August-2012: Added scan of GraphemeBreakProperty.txt and added a new
# field in the record to hold the value. Luckily, the
# structure had a hole in it, so the resulting table is
# not much bigger than before.
# 18-September-2012: Added code for multiple caseless sets. This uses the
# final hole in the structure.
# 30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0
# 13-May-2014: Updated for PCRE2
# 03-June-2014: Updated for Python 3
# 20-June-2014: Updated for Unicode 7.0.0
# 12-August-2014: Updated to put Unicode version into the file
# 19-June-2015: Updated for Unicode 8.0.0
# 02-July-2017: Updated for Unicode 10.0.0
# 03-July-2018: Updated for Unicode 11.0.0
# 07-July-2018: Added code to scan emoji-data.txt for the Extended
# Pictographic property.
# 01-October-2018: Added the 'Unknown' script name
# 03-October-2018: Added new field for Script Extensions
# 27-July-2019: Updated for Unicode 12.1.0
# ----------------------------------------------------------------------------
#
#
# The main tables generated by this script are used by macros defined in
# pcre2_internal.h. They look up Unicode character properties using short
# sequences of code that contains no branches, which makes for greater speed.
#
# Conceptually, there is a table of records (of type ucd_record), containing a
# script number, script extension value, character type, grapheme break type,
# offset to caseless matching set, offset to the character's other case, for
# every Unicode character. However, a real table covering all Unicode
# characters would be far too big. It can be efficiently compressed by
# observing that many characters have the same record, and many blocks of
# characters (taking 128 characters in a block) have the same set of records as
# other blocks. This leads to a 2-stage lookup process.
#
# This script constructs six tables. The ucd_caseless_sets table contains
# lists of characters that all match each other caselessly. Each list is
# in order, and is terminated by NOTACHAR (0xffffffff), which is larger than
# any valid character. The first list is empty; this is used for characters
# that are not part of any list.
#
# The ucd_digit_sets table contains the code points of the '9' characters in
# each set of 10 decimal digits in Unicode. This is used to ensure that digits
# in script runs all come from the same set. The first element in the vector
# contains the number of subsequent elements, which are in ascending order.
#
# The ucd_script_sets vector contains lists of script numbers that are the
# Script Extensions properties of certain characters. Each list is terminated
# by zero (ucp_Unknown). A character with more than one script listed for its
# Script Extension property has a negative value in its record. This is the
# negated offset to the start of the relevant list in the ucd_script_sets
# vector.
#
# The ucd_records table contains one instance of every unique record that is
# required. The ucd_stage1 table is indexed by a character's block number,
# which is the character's code point divided by 128, since 128 is the size
# of each block. The result of a lookup in ucd_stage1 a "virtual" block number.
#
# The ucd_stage2 table is a table of "virtual" blocks; each block is indexed by
# the offset of a character within its own block, and the result is the index
# number of the required record in the ucd_records vector.
#
# The following examples are correct for the Unicode 11.0.0 database. Future
# updates may make change the actual lookup values.
#
# Example: lowercase "a" (U+0061) is in block 0
# lookup 0 in stage1 table yields 0
# lookup 97 (0x61) in the first table in stage2 yields 17
# record 17 is { 34, 5, 12, 0, -32, 34, 0 }
# 34 = ucp_Latin => Latin script
# 5 = ucp_Ll => Lower case letter
# 12 = ucp_gbOther => Grapheme break property "Other"
# 0 => Not part of a caseless set
# -32 (-0x20) => Other case is U+0041
# 34 = ucp_Latin => No special Script Extension property
# 0 => Dummy value, unused at present
#
# Almost all lowercase latin characters resolve to the same record. One or two
# are different because they are part of a multi-character caseless set (for
# example, k, K and the Kelvin symbol are such a set).
#
# Example: hiragana letter A (U+3042) is in block 96 (0x60)
# lookup 96 in stage1 table yields 90
# lookup 66 (0x42) in table 90 in stage2 yields 564
# record 564 is { 27, 7, 12, 0, 0, 27, 0 }
# 27 = ucp_Hiragana => Hiragana script
# 7 = ucp_Lo => Other letter
# 12 = ucp_gbOther => Grapheme break property "Other"
# 0 => Not part of a caseless set
# 0 => No other case
# 27 = ucp_Hiragana => No special Script Extension property
# 0 => Dummy value, unused at present
#
# Example: vedic tone karshana (U+1CD0) is in block 57 (0x39)
# lookup 57 in stage1 table yields 55
# lookup 80 (0x50) in table 55 in stage2 yields 458
# record 458 is { 28, 12, 3, 0, 0, -101, 0 }
# 28 = ucp_Inherited => Script inherited from predecessor
# 12 = ucp_Mn => Non-spacing mark
# 3 = ucp_gbExtend => Grapheme break property "Extend"
# 0 => Not part of a caseless set
# 0 => No other case
# -101 => Script Extension list offset = 101
# 0 => Dummy value, unused at present
#
# At offset 101 in the ucd_script_sets vector we find the list 3, 15, 107, 29,
# and terminator 0. This means that this character is expected to be used with
# any of those scripts, which are Bengali, Devanagari, Grantha, and Kannada.
#
# <NAME>, 03 July 2008
# Last Updated: 07 October 2018
##############################################################################
import re
import string
import sys
MAX_UNICODE = 0x110000
NOTACHAR = 0xffffffff
# Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt
def make_get_names(enum):
return lambda chardata: enum.index(chardata[1])
# Parse a line of CaseFolding.txt
def get_other_case(chardata):
if chardata[1] == 'C' or chardata[1] == 'S':
return int(chardata[2], 16) - int(chardata[0], 16)
return 0
# Parse a line of ScriptExtensions.txt
def get_script_extension(chardata):
this_script_list = list(chardata[1].split(' '))
if len(this_script_list) == 1:
return script_abbrevs.index(this_script_list[0])
script_numbers = []
for d in this_script_list:
script_numbers.append(script_abbrevs.index(d))
script_numbers.append(0)
script_numbers_length = len(script_numbers)
for i in range(1, len(script_lists) - script_numbers_length + 1):
for j in range(0, script_numbers_length):
found = True
if script_lists[i+j] != script_numbers[j]:
found = False
break
if found:
return -i
# Not found in existing lists
return_value = len(script_lists)
script_lists.extend(script_numbers)
return -return_value
# Read the whole table in memory, setting/checking the Unicode version
def read_table(file_name, get_value, default_value):
global unicode_version
f = re.match(r'^[^/]+/([^.]+)\.txt$', file_name)
file_base = f.group(1)
version_pat = r"^# " + re.escape(file_base) + r"-(\d+\.\d+\.\d+)\.txt$"
file = open(file_name, 'r', encoding='utf-8')
f = re.match(version_pat, file.readline())
version = f.group(1)
if unicode_version == "":
unicode_version = version
elif unicode_version != version:
print("WARNING: Unicode version differs in %s", file_name, file=sys.stderr)
table = [default_value] * MAX_UNICODE
for line in file:
line = re.sub(r'#.*', '', line)
chardata = list(map(str.strip, line.split(';')))
if len(chardata) <= 1:
continue
value = get_value(chardata)
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
char = int(m.group(1), 16)
if m.group(3) is None:
last = char
else:
last = int(m.group(3), 16)
for i in range(char, last + 1):
# It is important not to overwrite a previously set
# value because in the CaseFolding file there are lines
# to be ignored (returning the default value of 0)
# which often come after a line which has already set
# data.
if table[i] == default_value:
table[i] = value
file.close()
return table
# Get the smallest possible C language type for the values
def get_type_size(table):
type_size = [("uint8_t", 1), ("uint16_t", 2), ("uint32_t", 4),
("signed char", 1), ("pcre_int16", 2), ("pcre_int32", 4)]
limits = [(0, 255), (0, 65535), (0, 4294967295),
(-128, 127), (-32768, 32767), (-2147483648, 2147483647)]
minval = min(table)
maxval = max(table)
for num, (minlimit, maxlimit) in enumerate(limits):
if minlimit <= minval and maxval <= maxlimit:
return type_size[num]
else:
raise OverflowError("Too large to fit into C types")
def get_tables_size(*tables):
total_size = 0
for table in tables:
type, size = get_type_size(table)
total_size += size * len(table)
return total_size
# Compress the table into the two stages
def compress_table(table, block_size):
blocks = {} # Dictionary for finding identical blocks
stage1 = [] # Stage 1 table contains block numbers (indices into stage 2 table)
stage2 = [] # Stage 2 table contains the blocks with property values
table = tuple(table)
for i in range(0, len(table), block_size):
block = table[i:i+block_size]
start = blocks.get(block)
if start is None:
# Allocate a new block
start = len(stage2) / block_size
stage2 += block
blocks[block] = start
stage1.append(start)
return stage1, stage2
# Print a table
def print_table(table, table_name, block_size = None):
type, size = get_type_size(table)
ELEMS_PER_LINE = 16
s = "const %s %s[] = { /* %d bytes" % (type, table_name, size * len(table))
if block_size:
s += ", block = %d" % block_size
print(s + " */")
table = tuple(table)
if block_size is None:
fmt = "%3d," * ELEMS_PER_LINE + " /* U+%04X */"
mult = MAX_UNICODE / len(table)
for i in range(0, len(table), ELEMS_PER_LINE):
print(fmt % (table[i:i+ELEMS_PER_LINE] +
(int(i * mult),)))
else:
if block_size > ELEMS_PER_LINE:
el = ELEMS_PER_LINE
else:
el = block_size
fmt = "%3d," * el + "\n"
if block_size > ELEMS_PER_LINE:
fmt = fmt * int(block_size / ELEMS_PER_LINE)
for i in range(0, len(table), block_size):
print(("/* block %d */\n" + fmt) % ((i / block_size,) + table[i:i+block_size]))
print("};\n")
# Extract the unique combinations of properties into records
def combine_tables(*tables):
records = {}
index = []
for t in zip(*tables):
i = records.get(t)
if i is None:
i = records[t] = len(records)
index.append(i)
return index, records
def get_record_size_struct(records):
size = 0
structure = '/* When recompiling tables with a new Unicode version, please check the\n' + \
'types in this structure definition from pcre2_internal.h (the actual\n' + \
'field names will be different):\n\ntypedef struct {\n'
for i in range(len(records[0])):
record_slice = [record[i] for record in records]
slice_type, slice_size = get_type_size(record_slice)
# add padding: round up to the nearest power of slice_size
size = (size + slice_size - 1) & -slice_size
size += slice_size
structure += '%s property_%d;\n' % (slice_type, i)
# round up to the first item of the next structure in array
record_slice = [record[0] for record in records]
slice_type, slice_size = get_type_size(record_slice)
size = (size + slice_size - 1) & -slice_size
structure += '} ucd_record;\n*/\n'
return size, structure
def test_record_size():
tests = [ \
( [(3,), (6,), (6,), (1,)], 1 ), \
( [(300,), (600,), (600,), (100,)], 2 ), \
( [(25, 3), (6, 6), (34, 6), (68, 1)], 2 ), \
( [(300, 3), (6, 6), (340, 6), (690, 1)], 4 ), \
( [(3, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(300, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(3, 100000), (6, 6), (6, 123456), (1, 690)], 8 ), \
( [(100000, 300), (6, 6), (123456, 6), (1, 690)], 8 ), \
]
for test in tests:
size, struct = get_record_size_struct(test[0])
assert(size == test[1])
#print struct
def print_records(records, record_size):
print('const ucd_record PRIV(ucd_records)[] = { ' + \
'/* %d bytes, record size %d */' % (len(records) * record_size, record_size))
records = list(zip(list(records.keys()), list(records.values())))
records.sort(key = lambda x: x[1])
for i, record in enumerate(records):
print((' {' + '%6d, ' * len(record[0]) + '}, /* %3d */') % (record[0] + (i,)))
print('};\n')
script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal',
'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian',
'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana',
'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam',
'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic',
'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana',
'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi',
# New for Unicode 5.0
'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician',
# New for Unicode 5.1
'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai',
# New for Unicode 5.2
'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic',
'Inscriptional_Pahlavi', 'Inscriptional_Parthian',
'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek',
'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet',
# New for Unicode 6.0.0
'Batak', 'Brahmi', 'Mandaic',
# New for Unicode 6.1.0
'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri',
# New for Unicode 7.0.0
'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi',
'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean',
'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi',
'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi',
# New for Unicode 8.0.0
'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian',
'SignWriting',
# New for Unicode 10.0.0
'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi',
'Nushu', 'Soyombo', 'Zanabazar_Square',
# New for Unicode 11.0.0
'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin',
'Old_Sogdian', 'Sogdian',
# New for Unicode 12.0.0
'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho'
]
script_abbrevs = [
'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans',
'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor',
'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr',
'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb',
'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya',
'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale',
'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii',
#New for Unicode 5.0
'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx',
#New for Unicode 5.1
'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur',
'Sund', 'Vaii',
#New for Unicode 5.2
'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu',
'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt',
#New for Unicode 6.0.0
'Batk', 'Brah', 'Mand',
#New for Unicode 6.1.0
'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr',
#New for Unicode 7.0.0
'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj',
'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm',
'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara',
#New for Unicode 8.0.0
'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw',
#New for Unicode 10.0.0
'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo',
'Zanb',
#New for Unicode 11.0.0
'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd',
#New for Unicode 12.0.0
'Elym', 'Nand', 'Hmnp', 'Wcho'
]
category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',
'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps',
'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ]
# The Extended_Pictographic property is not found in the file where all the
# others are (GraphemeBreakProperty.txt). It comes from the emoji-data.txt
# file, but we list it here so that the name has the correct index value.
break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend',
'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other',
'ZWJ', 'Extended_Pictographic' ]
test_record_size()
unicode_version = ""
script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown'))
category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn'))
break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other'))
other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0)
# The grapheme breaking rules were changed for Unicode 11.0.0 (June 2018). Now
# we need to find the Extended_Pictographic property for emoji characters. This
# can be set as an additional grapheme break property, because the default for
# all the emojis is "other". We scan the emoji-data.txt file and modify the
# break-props table.
file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8')
for line in file:
line = re.sub(r'#.*', '', line)
chardata = list(map(str.strip, line.split(';')))
if len(chardata) <= 1:
continue
if chardata[1] != "Extended_Pictographic":
continue
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
char = int(m.group(1), 16)
if m.group(3) is None:
last = char
else:
last = int(m.group(3), 16)
for i in range(char, last + 1):
if break_props[i] != break_property_names.index('Other'):
print("WARNING: Emoji 0x%x has break property %s, not 'Other'",
i, break_property_names[break_props[i]], file=sys.stderr)
break_props[i] = break_property_names.index('Extended_Pictographic')
file.close()
# The Script Extensions property default value is the Script value. Parse the
# file, setting 'Unknown' as the default (this will never be a Script Extension
# value), then scan it and fill in the default from Scripts. Code added by PH
# in October 2018. Positive values are used for just a single script for a
# code point. Negative values are negated offsets in a list of lists of
# multiple scripts. Initialize this list with a single entry, as the zeroth
# element is never used.
script_lists = [0]
script_abbrevs_default = script_abbrevs.index('Zzzz')
scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default)
for i in range(0, MAX_UNICODE):
if scriptx[i] == script_abbrevs_default:
scriptx[i] = script[i]
# With the addition of the new Script Extensions field, we need some padding
# to get the Unicode records up to 12 bytes (multiple of 4). Set a value
# greater than 255 to make the field 16 bits.
padding_dummy = [0] * MAX_UNICODE
padding_dummy[0] = 256
# This block of code was added by PH in September 2012. I am not a Python
# programmer, so the style is probably dreadful, but it does the job. It scans
# the other_case table to find sets of more than two characters that must all
# match each other caselessly. Later in this script a table of these sets is
# written out. However, we have to do this work here in order to compute the
# offsets in the table that are inserted into the main table.
# The CaseFolding.txt file lists pairs, but the common logic for reading data
# sets only one value, so first we go through the table and set "return"
# offsets for those that are not already set.
for c in range(MAX_UNICODE):
if other_case[c] != 0 and other_case[c + other_case[c]] == 0:
other_case[c + other_case[c]] = -other_case[c]
# Now scan again and create equivalence sets.
sets = []
for c in range(MAX_UNICODE):
o = c + other_case[c]
# Trigger when this character's other case does not point back here. We
# now have three characters that are case-equivalent.
if other_case[o] != -other_case[c]:
t = o + other_case[o]
# Scan the existing sets to see if any of the three characters are already
# part of a set. If so, unite the existing set with the new set.
appended = 0
for s in sets:
found = 0
for x in s:
if x == c or x == o or x == t:
found = 1
# Add new characters to an existing set
if found:
found = 0
for y in [c, o, t]:
for x in s:
if x == y:
found = 1
if not found:
s.append(y)
appended = 1
# If we have not added to an existing set, create a new one.
if not appended:
sets.append([c, o, t])
# End of loop looking for caseless sets.
# Now scan the sets and set appropriate offsets for the characters.
caseless_offsets = [0] * MAX_UNICODE
offset = 1;
for s in sets:
for x in s:
caseless_offsets[x] = offset
offset += len(s) + 1
# End of block of code for creating offsets for caseless matching sets.
# Combine the tables
table, records = combine_tables(script, category, break_props,
caseless_offsets, other_case, scriptx, padding_dummy)
record_size, record_struct = get_record_size_struct(list(records.keys()))
# Find the optimum block size for the two-stage table
min_size = sys.maxsize
for block_size in [2 ** i for i in range(5,10)]:
size = len(records) * record_size
stage1, stage2 = compress_table(table, block_size)
size += get_tables_size(stage1, stage2)
#print "/* block size %5d => %5d bytes */" % (block_size, size)
if size < min_size:
min_size = size
min_stage1, min_stage2 = stage1, stage2
min_block_size = block_size
print("/* This module is generated by the maint/MultiStage2.py script.")
print("Do not modify it by hand. Instead modify the script and run it")
print("to regenerate this code.")
print()
print("As well as being part of the PCRE2 library, this module is #included")
print("by the pcre2test program, which redefines the PRIV macro to change")
print("table names from _pcre2_xxx to xxxx, thereby avoiding name clashes")
print("with the library. At present, just one of these tables is actually")
print("needed. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
print("#ifdef HAVE_CONFIG_H")
print("#include \"config.h\"")
print("#endif")
print()
print("#include \"pcre2_internal.h\"")
print()
print("#endif /* PCRE2_PCRE2TEST */")
print()
print("/* Unicode character database. */")
print("/* This file was autogenerated by the MultiStage2.py script. */")
print("/* Total size: %d bytes, block size: %d. */" % (min_size, min_block_size))
print()
print("/* The tables herein are needed only when UCP support is built,")
print("and in PCRE2 that happens automatically with UTF support.")
print("This module should not be referenced otherwise, so")
print("it should not matter whether it is compiled or not. However")
print("a comment was received about space saving - maybe the guy linked")
print("all the modules rather than using a library - so we include a")
print("condition to cut out the tables when not needed. But don't leave")
print("a totally empty module because some compilers barf at that.")
print("Instead, just supply some small dummy tables. */")
print()
print("#ifndef SUPPORT_UNICODE")
print("const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};")
print("const uint16_t PRIV(ucd_stage1)[] = {0};")
print("const uint16_t PRIV(ucd_stage2)[] = {0};")
print("const uint32_t PRIV(ucd_caseless_sets)[] = {0};")
print("#else")
print()
print("const char *PRIV(unicode_version) = \"{}\";".format(unicode_version))
print()
print("/* If the 32-bit library is run in non-32-bit mode, character values")
print("greater than 0x10ffff may be encountered. For these we set up a")
print("special record. */")
print()
print("#if PCRE2_CODE_UNIT_WIDTH == 32")
print("const ucd_record PRIV(dummy_ucd_record)[] = {{")
print(" ucp_Unknown, /* script */")
print(" ucp_Cn, /* type unassigned */")
print(" ucp_gbOther, /* grapheme break property */")
print(" 0, /* case set */")
print(" 0, /* other case */")
print(" ucp_Unknown, /* script extension */")
print(" 0, /* dummy filler */")
print(" }};")
print("#endif")
print()
print(record_struct)
# --- Added by PH: output the table of caseless character sets ---
print("/* This table contains lists of characters that are caseless sets of")
print("more than one character. Each list is terminated by NOTACHAR. */\n")
print("const uint32_t PRIV(ucd_caseless_sets)[] = {")
print(" NOTACHAR,")
for s in sets:
s = sorted(s)
for x in s:
print(' 0x%04x,' % x, end=' ')
print(' NOTACHAR,')
print('};')
print()
# ------
print("/* When #included in pcre2test, we don't need the table of digit")
print("sets, nor the the large main UCD tables. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
# --- Added by PH: read Scripts.txt again for the sets of 10 digits. ---
digitsets = []
file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8')
for line in file:
m = re.match(r'([0-9a-fA-F]+)\.\.([0-9a-fA-F]+)\s+;\s+\S+\s+#\s+Nd\s+', line)
if m is None:
continue
first = int(m.group(1),16)
last = int(m.group(2),16)
if ((last - first + 1) % 10) != 0:
print("ERROR: %04x..%04x does not contain a multiple of 10 characters" % (first, last),
file=sys.stderr)
while first < last:
digitsets.append(first + 9)
first += 10
file.close()
digitsets.sort()
print("/* This table lists the code points for the '9' characters in each")
print("set of decimal digits. It is used to ensure that all the digits in")
print("a script run come from the same set. */\n")
print("const uint32_t PRIV(ucd_digit_sets)[] = {")
print(" %d, /* Number of subsequent values */" % len(digitsets), end='')
count = 8
for d in digitsets:
if count == 8:
print("\n ", end='')
count = 0
print(" 0x%05x," % d, end='')
count += 1
print("\n};\n")
print("/* This vector is a list of lists of scripts for the Script Extension")
print("property. Each sublist is zero-terminated. */\n")
print("const uint8_t PRIV(ucd_script_sets)[] = {")
count = 0
print(" /* 0 */", end='')
for d in script_lists:
print(" %3d," % d, end='')
count += 1
if d == 0:
print("\n /* %3d */" % count, end='')
print("\n};\n")
# Output the main UCD tables.
print("/* These are the main two-stage UCD tables. The fields in each record are:")
print("script (8 bits), character type (8 bits), grapheme break property (8 bits),")
print("offset to multichar other cases or zero (8 bits), offset to other case")
print("or zero (32 bits, signed), script extension (16 bits, signed), and a dummy")
print("16-bit field to make the whole thing a multiple of 4 bytes. */\n")
print_records(records, record_size)
print_table(min_stage1, 'PRIV(ucd_stage1)')
print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size)
print("#if UCD_BLOCK_SIZE != %d" % min_block_size)
print("#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h")
print("#endif")
print("#endif /* SUPPORT_UNICODE */")
print()
print("#endif /* PCRE2_PCRE2TEST */")
# This code was part of the original contribution, but is commented out as it
# was never used. A two-stage table has sufficed.
"""
# Three-stage tables:
# Find the optimum block size for 3-stage table
min_size = sys.maxint
for stage3_block in [2 ** i for i in range(2,6)]:
stage_i, stage3 = compress_table(table, stage3_block)
for stage2_block in [2 ** i for i in range(5,10)]:
size = len(records) * 4
stage1, stage2 = compress_table(stage_i, stage2_block)
size += get_tables_size(stage1, stage2, stage3)
# print "/* %5d / %3d => %5d bytes */" % (stage2_block, stage3_block, size)
if size < min_size:
min_size = size
min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3
min_stage2_block, min_stage3_block = stage2_block, stage3_block
print "/* Total size: %d bytes" % min_size */
print_records(records)
print_table(min_stage1, 'ucd_stage1')
print_table(min_stage2, 'ucd_stage2', min_stage2_block)
print_table(min_stage3, 'ucd_stage3', min_stage3_block)
"""
| 1.765625 | 2 |
setup.py | ihayhurst/RetroBioCat | 9 | 3259 | from setuptools import setup, find_packages
from retrobiocat_web import __version__
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name = 'retrobiocat_web',
packages = find_packages(),
include_package_data=True,
version = __version__,
license='',
description = 'Retrosynthesis',
author = '<NAME>',
author_email = '<EMAIL>',
url = '',
download_url = '',
keywords = ['enzyme'],
install_requires=requirements,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'],
) | 1.375 | 1 |
rxn_yield_context/preprocess_data/preprocess/augmentation_utils.py | Lung-Yi/rxn_yield_context | 0 | 3260 | # -*- coding: utf-8 -*-
import pickle
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem,DataStructs
def get_classes(path):
f = open(path, 'rb')
dict_ = pickle.load(f)
f.close()
classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True)
classes = [(x,y) for x,y in classes]
return classes
def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True):
# Similar as the above function but takes smiles separately and returns pfp and rfp separately
rsmi = rsmi.encode('utf-8')
psmi = psmi.encode('utf-8')
try:
mol = Chem.MolFromSmiles(rsmi)
except Exception as e:
print(e)
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(rxnfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build reactant fp due to {}".format(e))
return
rfp = fp
try:
mol = Chem.MolFromSmiles(psmi)
except Exception as e:
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(pfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build product fp due to {}".format(e))
return
pfp = fp
rxn_fp = pfp - rfp
final_fp = np.concatenate((pfp, rxn_fp))
return final_fp | 2.4375 | 2 |
src/webstruct-demo/__init__.py | zanachka/webstruct-demo | 5 | 3261 | import functools
import logging
import random
from flask import Flask, render_template, request
import joblib
from lxml.html import html5parser
import lxml.html
import requests
import yarl
import webstruct.model
import webstruct.sequence_encoding
import webstruct.webannotator
webstruct_demo = Flask(__name__, instance_relative_config=True)
webstruct_demo.config.from_pyfile('config.py')
def absolutize_link(link, base_url):
if link.startswith('#'):
return link
try:
target_url = yarl.URL(link)
except:
return link
if target_url.is_absolute() and target_url.scheme:
return link
if target_url.is_absolute() and not target_url.scheme:
target_url = target_url.with_scheme(base_url.scheme)
return str(target_url)
try:
target_url = base_url.join(target_url)
except:
return link
return str(target_url)
def absolute_links(tree, url):
_LINK_SOURCES = ['src', 'href']
try:
base_url = yarl.URL(url)
except:
return tree
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
for attr in _LINK_SOURCES:
if attr not in element.attrib:
continue
element.attrib[attr] = absolutize_link(element.attrib[attr], base_url)
return tree
def parent_links(tree, base_url):
base_url = yarl.URL(base_url)
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
if element.tag != 'a':
continue
if 'href' not in element.attrib:
continue
url = element.attrib['href']
if url.startswith('#'):
continue
element.attrib['target'] = '_parent'
element.attrib['href'] = str(base_url.update_query(url=url))
return tree
def remove_namespace(tree):
_NS="{http://www.w3.org/1999/xhtml}"
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
if not element.tag.startswith(_NS):
continue
element.tag = element.tag[len(_NS):]
return tree
_TOKENS_PER_PART = 2000
def run_model(tree, model):
html_tokens, _ = model.html_tokenizer.tokenize_single(tree)
if not html_tokens:
return tree, list(), list()
tree = html_tokens[0].elem.getroottree().getroot()
tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens), _TOKENS_PER_PART)])
tags = [i for t in tags for i in t]
return tree, html_tokens, tags
def download(url):
splash_url = webstruct_demo.config.get('SPLASH_URL', None)
splash_user = webstruct_demo.config.get('SPLASH_USER', None)
splash_pass = webstruct_demo.config.get('SPLASH_PASS', None)
is_splash = functools.reduce(lambda x,y: x and y is not None,
[splash_url, splash_user, splash_pass],
True)
if not is_splash:
response = requests.get(url)
return response.content, response.url
load = {'url': url,
'images': 0,
'base_url': url}
response = requests.post(splash_url + '/render.html',
json=load,
auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass))
return response.content, url
def extract_ner(response_content, response_url, base_url):
url = response_url
tree = html5parser.document_fromstring(response_content)
tree = remove_namespace(tree)
tree = absolute_links(tree, url)
tree = parent_links(tree, base_url)
title = tree.xpath('//title')[0].text
model = joblib.load(webstruct_demo.config['MODEL_PATH'])
tree, tokens, tags = run_model(tree, model)
tree = model.html_tokenizer.detokenize_single(tokens, tags)
tree = webstruct.webannotator.to_webannotator(
tree,
entity_colors=model.entity_colors,
url=url
)
content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8')
entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags))
entities = webstruct.model._drop_empty(
(model.build_entity(tokens), tag)
for (tokens, tag) in entities if tag != 'O'
)
groups = webstruct.model.extract_entitiy_groups(
tokens,
tags,
dont_penalize=None,
join_tokens=model.build_entity
)
return content, title, entities, groups
def sample_entities(entities):
unique = list(set(entities))
random.shuffle(unique)
sampled = unique[:5]
sampled = sorted(sampled, key=lambda e:(e[1], e[0]))
return sampled
def sample_groups(groups):
groups = [tuple(sorted(g)) for g in groups]
sampled = sorted(list(set(groups)), key=lambda g:-len(g))
return sampled[:2]
@webstruct_demo.route('/')
def index():
url = request.args.get('url', 'http://en.wikipedia.org/')
output = request.args.get('output', 'html')
try:
response_content, response_url = download(url)
content, title, entities, groups = extract_ner(response_content,
response_url,
request.url)
except:
logging.exception('Got exception')
content = None
title = 'Error during obtaining %s' % (url, )
entities = []
groups = []
_TEMPLATE_MAPPING = {'html': 'main.html',
'entities': 'entities.html',
'groups': 'groups.html'}
template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html'])
sampled_entities = sample_entities(entities)
sampled_groups = sample_groups(groups)
base_url = yarl.URL(request.url)
routing = {t: str(base_url.update_query(output=t)) for t in ['html', 'entities', 'groups']}
values = {'url': url,
'title': title,
'entities': entities,
'sampled_entities': sampled_entities,
'sampled_groups': sampled_groups,
'routing': routing,
'srcdoc': content,
'groups': groups,
'output': output}
return render_template(template, **values)
| 2.1875 | 2 |
setup.py | Liang813/einops | 4,738 | 3262 | <gh_stars>1000+
__author__ = '<NAME>'
from setuptools import setup
setup(
name="einops",
version='0.3.2',
description="A new flavour of deep learning operations",
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
url='https://github.com/arogozhnikov/einops',
author='<NAME>',
packages=['einops', 'einops.layers'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 ',
],
keywords='deep learning, neural networks, tensor manipulation, machine learning, '
'scientific computations, einops',
install_requires=[
# no run-time or installation-time dependencies
],
)
| 1.445313 | 1 |
website/migrations/0084_auto_20210215_1401.py | czhu1217/cmimc-online | 0 | 3263 | # Generated by Django 3.1.6 on 2021-02-15 19:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0083_remove_aisubmission_code'),
]
operations = [
migrations.AddField(
model_name='exam',
name='division',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.CreateModel(
name='ExamPair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='exampairs', to='website.contest')),
],
),
migrations.AddField(
model_name='exam',
name='exampair',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='exams', to='website.exampair'),
),
]
| 1.664063 | 2 |
ldp/tasks/dlp.py | evandez/low-dimensional-probing | 1 | 3264 | """Core experiments for the dependency label prediction task."""
import collections
import copy
import logging
from typing import (Any, Dict, Iterator, Optional, Sequence, Set, Tuple, Type,
Union)
from ldp import datasets, learning
from ldp.models import probes, projections
from ldp.parse import ptb
from ldp.parse import representations as reps
from ldp.utils.typing import Device
import numpy
import torch
import wandb
UNK = 'unk'
class DLPIndexer:
"""Map pairs of words to their syntactic relationship, if any."""
def __init__(self, samples: Sequence[ptb.Sample], unk: str = UNK):
"""Map each relation label to an integer.
Args:
samples (Sequence[ptb.Sample]): The samples from which to determine
possible relations.
unk (str): Label to use when un-indexed dependency label is
encountered.
"""
labels = {rel for sample in samples for rel in sample.relations}
self.indexer = {unk: 0}
for label in sorted(labels):
self.indexer[label] = len(self.indexer)
self.unk = unk
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads, relations = sample.heads, sample.relations
labels = torch.empty(len(heads), len(heads), dtype=torch.long)
labels.fill_(self.indexer[self.unk])
for word, (head, rel) in enumerate(zip(heads, relations)):
if head == -1:
labels[word, word] = self.indexer[rel]
else:
label = self.indexer.get(rel, self.indexer[self.unk])
labels[word, head] = label
return labels
def __len__(self) -> int:
"""Return the number of unique labels for this task."""
return len(self.indexer)
class ControlDLPIndexer:
"""Map pairs of words to arbitrary syntactic relationships."""
def __init__(self,
samples: Sequence[ptb.Sample],
dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None):
"""Map each relation label to an arbitrary (integer) label.
We only do this for pairs of words which have a head-dependent
relationship in the original dataset.
Args:
samples (Sequence[ptb.Samples]): The samples from which to pull
possible word pairs.
dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A
distribution to use when sampling tags per word type.
By default, is computed from the list of samples.
"""
if dist is None:
counts: Dict[str, int] = collections.defaultdict(lambda: 0)
for sample in samples:
for relation in sample.relations:
counts[relation] += 1
dist = numpy.array([float(count) for count in counts.values()])
dist /= numpy.sum(dist)
assert dist is not None, 'uninitialized distribution?'
self.dist = dist
self.rels: Dict[Tuple[str, str], int] = {}
for sample in samples:
sentence = sample.sentence
heads = sample.heads
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sentence[dep], sentence[head])
if words not in self.rels:
# Add one so that 0 is reserved for "no relationship" tag.
rel = numpy.random.choice(len(dist), p=dist) + 1
self.rels[words] = rel
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads = sample.heads
labels = torch.zeros(len(heads), len(heads), dtype=torch.long)
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sample.sentence[dep], sample.sentence[head])
labels[dep, head] = self.rels.get(words, 0)
return labels
def __len__(self) -> int:
"""Return the number of relationships, including the null one."""
return len(self.dist) + 1
class DLPTaskDataset(datasets.TaskDataset):
"""Iterate over (word representation pair, dependency label) pairs."""
def __init__(
self,
representations: reps.RepresentationLayerDataset,
annotations: Sequence[ptb.Sample],
indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer,
**kwargs: Any,
):
"""Initialize dataset by mapping each dependency label to an index.
The kwargs are forwarded to indexer when it is instantiated.
Args:
representations (representations.RepresentationsLayerDataset): Word
representations corresponding to the words to be paired and
labeled.
annotations (Sequence[ptb.PTBSample]): The PTB annotations from
which to pull dependency labels.
indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer
to use for mapping PTB dependency label annotations to integer
tensors. Instantiated with given annotations unless the
samples keyword is set in kwargs.
Raises:
ValueError: If number of representations/annotations do not match.
"""
if len(representations) != len(annotations):
raise ValueError(f'got {len(representations)} representations '
f'but {len(annotations)} annotations')
self.representations = representations
self.annotations = annotations
kwargs = kwargs.copy()
kwargs.setdefault('samples', annotations)
self.indexer = indexer(**kwargs)
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return (representations, integral POS tags) for index'th sentence.
Args:
index (int): Index of the sentence in the dataset.
Returns:
Tuple[torch.Tensor, torch.Tensor]: First tensor is shape
(sentence_length, representation_dimension) containing word
representations, and second is shape (sentence_length,)
containing integral POS tags.
"""
representations = self.representations[index]
annotations = self.annotations[index]
assert len(representations) == len(
annotations.sentence), 'diff sentence lengths?'
rels = self.indexer(annotations)
# Find all pairs of words sharing an edge.
indexes = set(range(len(representations)))
pairs = [(i, j) for i in indexes for j in indexes if rels[i, j]]
assert pairs and len(pairs) == len(representations), 'missing edges?'
# Stack everything before returning it.
bigrams = torch.stack([
torch.stack((representations[i], representations[j]))
for i, j in pairs
])
labels = torch.stack([rels[i, j] for i, j in pairs])
return bigrams, labels
def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]:
"""Yield all (sentence representations, sentence POS tags) samples."""
for index in range(len(self)):
yield self[index]
def __len__(self) -> int:
"""Return the number of sentences (batches) in the dataset."""
return len(self.annotations)
@property
def sample_representations_shape(self) -> Sequence[int]:
"""Return the dimensionality of the representation pairs."""
return (2, self.representations.dataset.dimension)
@property
def sample_features_shape(self) -> Sequence[int]:
"""Return the shape of each individual POS tag.
Since POS tags are integral scalars, there is no such shape!
"""
return ()
def count_samples(self) -> int:
"""Return the number of words in the dataset."""
return sum(
self.representations.dataset.length(index)
for index in range(len(self.representations)))
def count_unique_features(self) -> int:
"""Return number of unique POS seen in data."""
return len(self.indexer)
# Define the valid probe types for this task.
Probe = Union[probes.Linear, probes.MLP]
def train(train_dataset: datasets.TaskDataset,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
probe_t: Type[Probe] = probes.Linear,
project_to: Optional[int] = None,
share_projection: bool = False,
epochs: int = 25,
patience: int = 4,
lr: float = 1e-3,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Tuple[Probe, float]:
"""Train a probe on dependency label prediction.
Args:
train_dataset (TaskDataset): Training data for probe.
dev_dataset (TaskDataset): Validation data for probe, used for early
stopping.
test_dataset (TaskDataset): Test data for probe, used to compute
final accuracy after training.
probe_t (Type[Probe], optional): Probe type to train.
Defaults to probes.Linear.
project_to (Optional[int], optional): Project representations to this
dimensionality. Defaults to no projection.
share_projection (bool): If set, project the left and right components
of pairwise probes with the same projection. E.g. if the probe is
bilinear of the form xAy, we will always compute (Px)A(Py) as
opposed to (Px)A(Qy) for distinct projections P, Q. Defaults to NOT
shared.
epochs (int, optional): Maximum passes through the training dataset.
Defaults to 25.
patience (int, optional): Allow dev loss to not improve for this many
epochs, then stop training. Defaults to 4.
lr (float, optional): Learning rate for optimizer. Defaults to 1e-3.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (Optional[pathlib.Path], optional): If set, log
training data to wandb. By default, wandb is not used.
Returns:
Tuple[Probe, float]: The trained probe and its test accuracy.
"""
log = logging.getLogger(__name__)
device = device or 'cpu'
ndims = train_dataset.sample_representations_shape[-1]
log.info('representations have dimension %d', ndims)
ntags = train_dataset.count_unique_features()
assert ntags is not None, 'no label count, is dataset for different task?'
log.info('dependency labeling task has %d tags', ntags)
if project_to is None or ndims == project_to:
logging.info('projection dim = reps dim, not projecting')
projection = None
elif share_projection:
projection = projections.Projection(ndims, project_to)
else:
projection = projections.Projection(2 * ndims, 2 * project_to)
probe = probe_t(2 * (project_to or ndims), ntags, project=projection)
learning.train(probe,
train_dataset,
dev_dataset=dev_dataset,
stopper=learning.EarlyStopping(patience=patience),
epochs=epochs,
lr=lr,
device=device,
also_log_to_wandb=also_log_to_wandb)
accuracy = learning.test(probe, test_dataset, device=device)
return probe, accuracy
# TODO(evandez): May as well commonize this, since it's shared with POS.
def axis_alignment(
probe: Probe,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]:
"""Measure whether the given probe is axis aligned.
Args:
probe (Probe): The probe to evaluate.
dev_dataset (datasets.TaskDataset): Data used to determine which axes
to cut.
test_dataset (datasets.TaskDataset): Data used to determine the effect
of cutting an axis.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (bool, optional): If set, log results to wandb.
Returns:
Sequence[Tuple[int, float]]: The ablated axes paired with optimal probe
accuracy after that axis is zeroed.
"""
log = logging.getLogger(__name__)
projection = probe.project
assert projection is not None, 'no projection?'
axes = set(range(projection.project.in_features))
ablated: Set[int] = set()
accuracies = []
while axes:
best_model, best_axis, best_accuracy = probe, -1, -1.
for axis in axes:
model = copy.deepcopy(best_model).eval()
assert model.project is not None, 'no projection?'
model.project.project.weight.data[:, sorted(ablated | {axis})] = 0
accuracy = learning.test(model, dev_dataset, device=device)
if accuracy > best_accuracy:
best_model = model
best_axis = axis
best_accuracy = accuracy
accuracy = learning.test(best_model, test_dataset, device=device)
log.info('ablating axis %d, test accuracy %f', best_axis, accuracy)
if also_log_to_wandb:
wandb.log({
'axis': best_axis,
'dev accuracy': best_accuracy,
'test accuracy': accuracy,
})
axes.remove(best_axis)
ablated.add(best_axis)
accuracies.append((best_axis, accuracy))
return tuple(accuracies)
| 2.484375 | 2 |
pycquery_krb/common/ccache.py | naver/PyCQuery | 2 | 3265 | <filename>pycquery_krb/common/ccache.py
#!/usr/bin/env python3
#
# Author:
# <NAME> (@skelsec)
#
import os
import io
import datetime
import glob
import hashlib
from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \
krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart
from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat
from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE
from pycquery_krb import logger
from asn1crypto import core
# http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt
class Header:
def __init__(self):
self.tag = None
self.taglen = None
self.tagdata = None
@staticmethod
def parse(data):
"""
returns a list of header tags
"""
reader = io.BytesIO(data)
headers = []
while reader.tell() < len(data):
h = Header()
h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.tagdata = reader.read(h.taglen)
headers.append(h)
return headers
def to_bytes(self):
t = self.tag.to_bytes(2, byteorder='big', signed=False)
t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False)
t += self.tagdata
return t
def __str__(self):
t = 'tag: %s\n' % self.tag
t += 'taglen: %s\n' % self.taglen
t += 'tagdata: %s\n' % self.tagdata
return t
class DateTime:
def __init__(self):
self.time_offset = None
self.usec_offset = None
@staticmethod
def parse(reader):
d = DateTime()
d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)
d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)
return d
def to_bytes(self):
t = self.time_offset.to_bytes(4, byteorder='big', signed=False)
t += self.usec_offset.to_bytes(4, byteorder='big', signed=False)
return t
class Credential:
def __init__(self):
self.client = None
self.server = None
self.key = None
self.time = None
self.is_skey = None
self.tktflags = None
self.num_address = None
self.addrs = []
self.num_authdata = None
self.authdata = []
self.ticket = None
self.second_ticket = None
def to_hash(self):
res = Ticket.load(self.ticket.to_asn1()).native
tgs_encryption_type = int(res['enc-part']['etype'])
t = len(res['sname']['name-string'])
if t == 1:
tgs_name_string = res['sname']['name-string'][0]
else:
tgs_name_string = res['sname']['name-string'][1]
tgs_realm = res['realm']
if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value:
tgs_checksum = res['enc-part']['cipher'][-12:]
tgs_encrypted_data2 = res['enc-part']['cipher'][:-12]
return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )
else:
tgs_checksum = res['enc-part']['cipher'][:16]
tgs_encrypted_data2 = res['enc-part']['cipher'][16:]
return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )
def to_tgt(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t
def to_tgs(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t
def to_kirbi(self):
filename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8])
krbcredinfo = {}
krbcredinfo['key'] = EncryptionKey(self.key.to_asn1())
krbcredinfo['prealm'] = self.client.realm.to_string()
krbcredinfo['pname'] = self.client.to_asn1()[0]
krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags)
if self.time.authtime != 0: #this parameter is not mandatory, and most of the time not present
krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc)
if self.time.starttime != 0:
krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc)
if self.time.endtime != 0:
krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc)
if self.time.renew_till != 0: #this parameter is not mandatory, and sometimes it's not present
krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc)
krbcredinfo['srealm'] = self.server.realm.to_string()
krbcredinfo['sname'] = self.server.to_asn1()[0]
enc_krbcred = {}
enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)]
krbcred = {}
krbcred['pvno'] = krb5_pvno
krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value
krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())]
krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()})
kirbi = KRBCRED(krbcred)
return kirbi, filename
@staticmethod
def from_asn1(ticket, data):
###
# data = KrbCredInfo
###
c = Credential()
c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm'])
c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm'])
c.key = Keyblock.from_asn1(data['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher'])
c.second_ticket = CCACHEOctetString.empty()
return c
@staticmethod
def parse(reader):
c = Credential()
c.client = CCACHEPrincipal.parse(reader)
c.server = CCACHEPrincipal.parse(reader)
c.key = Keyblock.parse(reader)
c.time = Times.parse(reader)
c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False)
c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False)
c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False)
for _ in range(c.num_address):
c.addrs.append(Address.parse(reader))
c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False)
for _ in range(c.num_authdata):
c.authdata.append(Authdata.parse(reader))
c.ticket = CCACHEOctetString.parse(reader)
c.second_ticket = CCACHEOctetString.parse(reader)
return c
@staticmethod
def summary_header():
return ['client','server','starttime','endtime','renew-till']
def summary(self):
return [
'%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()),
'%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()),
datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A',
datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A',
datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A',
]
def to_bytes(self):
t = self.client.to_bytes()
t += self.server.to_bytes()
t += self.key.to_bytes()
t += self.time.to_bytes()
t += self.is_skey.to_bytes(1, byteorder='big', signed=False)
t += self.tktflags.to_bytes(4, byteorder='little', signed=False)
t += self.num_address.to_bytes(4, byteorder='big', signed=False)
for addr in self.addrs:
t += addr.to_bytes()
t += self.num_authdata.to_bytes(4, byteorder='big', signed=False)
for ad in self.authdata:
t += ad.to_bytes()
t += self.ticket.to_bytes()
t += self.second_ticket.to_bytes()
return t
class Keyblock:
def __init__(self):
self.keytype = None
self.etype = None
self.keylen = None
self.keyvalue = None
@staticmethod
def from_asn1(data):
k = Keyblock()
k.keytype = data['keytype']
k.etype = 0 # not sure
k.keylen = len(data['keyvalue'])
k.keyvalue = data['keyvalue']
return k
def to_asn1(self):
t = {}
t['keytype'] = self.keytype
t['keyvalue'] = self.keyvalue
return t
@staticmethod
def parse(reader):
k = Keyblock()
k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False)
k.keyvalue = reader.read(k.keylen)
return k
def to_bytes(self):
t = self.keytype.to_bytes(2, byteorder='big', signed=False)
t += self.etype.to_bytes(2, byteorder='big', signed=False)
t += self.keylen.to_bytes(2, byteorder='big', signed=False)
t += self.keyvalue
return t
class Times:
def __init__(self):
self.authtime = None
self.starttime = None
self.endtime = None
self.renew_till = None
@staticmethod
def from_asn1(enc_as_rep_part):
t = Times()
t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \
if 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else 0
t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \
if 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else 0
t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \
if 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else 0
t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \
if 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0
return t
@staticmethod
def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)):
t = Times()
t.authtime = dt_to_kerbtime(start)
t.starttime = dt_to_kerbtime(start )
t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1))
t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2))
return t
@staticmethod
def parse(reader):
t = Times()
t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)
t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False)
t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)
t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False)
return t
def to_bytes(self):
t = self.authtime.to_bytes(4, byteorder='big', signed=False)
t += self.starttime.to_bytes(4, byteorder='big', signed=False)
t += self.endtime.to_bytes(4, byteorder='big', signed=False)
t += self.renew_till.to_bytes(4, byteorder='big', signed=False)
return t
class Address:
def __init__(self):
self.addrtype = None
self.addrdata = None
@staticmethod
def parse(reader):
a = Address()
a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
a.addrdata = CCACHEOctetString.parse(reader)
return a
def to_bytes(self):
t = self.addrtype.to_bytes(2, byteorder='big', signed=False)
t += self.addrdata.to_bytes()
return t
class Authdata:
def __init__(self):
self.authtype = None
self.authdata = None
@staticmethod
def parse(reader):
a = Authdata()
a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
a.authdata = CCACHEOctetString.parse(reader)
return a
def to_bytes(self):
t = self.authtype.to_bytes(2, byteorder='big', signed=False)
t += self.authdata.to_bytes()
return t
class CCACHEPrincipal:
def __init__(self):
self.name_type = None
self.num_components = None
self.realm = None
self.components = []
@staticmethod
def from_asn1(principal, realm):
p = CCACHEPrincipal()
p.name_type = principal['name-type']
p.num_components = len(principal['name-string'])
p.realm = CCACHEOctetString.from_string(realm)
for comp in principal['name-string']:
p.components.append(CCACHEOctetString.from_asn1(comp))
return p
@staticmethod
def dummy():
p = CCACHEPrincipal()
p.name_type = 1
p.num_components = 1
p.realm = CCACHEOctetString.from_string('kerbi.corp')
for _ in range(1):
p.components.append(CCACHEOctetString.from_string('kerbi'))
return p
def to_string(self, separator='-'):
return separator.join([c.to_string() for c in self.components])
def to_asn1(self):
t = {'name-type': self.name_type, 'name-string': [name.to_string() for name in self.components]}
return t, self.realm.to_string()
@staticmethod
def parse(reader):
p = CCACHEPrincipal()
p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False)
p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False)
p.realm = CCACHEOctetString.parse(reader)
for _ in range(p.num_components):
p.components.append(CCACHEOctetString.parse(reader))
return p
def to_bytes(self):
t = self.name_type.to_bytes(4, byteorder='big', signed=False)
t += len(self.components).to_bytes(4, byteorder='big', signed=False)
t += self.realm.to_bytes()
for com in self.components:
t += com.to_bytes()
return t
class CCACHEOctetString:
def __init__(self):
self.length = None
self.data = None
@staticmethod
def empty():
o = CCACHEOctetString()
o.length = 0
o.data = b''
return o
def to_asn1(self):
return self.data
def to_string(self):
return self.data.decode()
@staticmethod
def from_string(data):
o = CCACHEOctetString()
o.data = data.encode()
o.length = len(o.data)
return o
@staticmethod
def from_asn1(data):
o = CCACHEOctetString()
o.length = len(data)
if isinstance(data,str):
o.data = data.encode()
else:
o.data = data
return o
@staticmethod
def parse(reader):
o = CCACHEOctetString()
o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False)
o.data = reader.read(o.length)
return o
def to_bytes(self):
if isinstance(self.data,str):
self.data = self.data.encode()
self.length = len(self.data)
t = len(self.data).to_bytes(4, byteorder='big', signed=False)
t += self.data
return t
class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logger.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
@staticmethod
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string(separator='/').lower().find('krbtgt') != -1:
tgt = [cred.to_tgt(), cred.time]
tgts.append(tgt)
return tgts
def get_all_tgs(self):
tgss = []
for cred in self.credentials:
if cred.server.to_string(separator = '/').lower().find('krbtgt') == -1:
tgss.append(cred.to_tgs())
return tgss
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
@staticmethod
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
cred = Credential.parse(reader)
if not (len(cred.server.components) > 0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data'
and cred.server.realm.to_string() == 'X-CACHECONF:'):
c.credentials.append(cred)
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
@staticmethod
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
@staticmethod
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
@staticmethod
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
@staticmethod
def from_bytes(data):
return CCACHE.parse(io.BytesIO(data))
| 2.40625 | 2 |
getUniformSmiles.py | OpenEye-Contrib/Molecular-List-Logic | 2 | 3266 | #!/opt/az/psf/python/2.7/bin/python
from openeye.oechem import *
import cgi
#creates a list of smiles of the syntax [smiles|molId,smiles|molId]
def process_smiles(smiles):
smiles = smiles.split('\n')
mol = OEGraphMol()
smiles_list=[]
for line in smiles:
if len(line.rstrip())>0:
line = line.split()
smi = line[0]
molId = ""
if len(line)>1:
molId = line[1].replace(" ","|").rstrip()
if(OEParseSmiles(mol,smi)):
smi = OECreateSmiString(mol)
mol.Clear()
smiles_list.append(smi + "|" + molId) #can't send spaces or new lines
return smiles_list
#takes a list of smiles and writes it as sdf using a memory buffer
def write_sdf(smiles_list):
sdfs = []
ofs = oemolostream()
ofs.SetFormat(OEFormat_SDF)
ofs.openstring()
mol = OEGraphMol()
for smiles in smiles_list:
if(OEParseSmiles(mol,smiles.replace("|"," "))):
OEWriteMolecule(ofs,mol)
sdfs.append(ofs.GetString())
mol.Clear()
ofs.SetString("")
return sdfs
#creates a list of smiles of the syntax [smiles|molId,smiles|molId]
def read_sdf(sdf_data):
ifs = oemolistream()
ifs.SetFormat(OEFormat_SDF)
ifs.openstring(sdf_data)
smiles_list = []
for mol in ifs.GetOEGraphMols():
smiles = OECreateSmiString(mol)
smiles_list.append(smiles + "|" + mol.GetTitle())
return smiles_list
if __name__ == "__main__":
print "Content-Type: text/html\r\n\r\n"
form = cgi.FieldStorage()
extension = form.getvalue("extension")
dataA = form.getvalue("dataA")
operator = form.getvalue("smiles_operator")
sdf_output = form.getvalue("sdf_output")
if(extension=="smi"):
list_A = process_smiles(dataA)
else:
list_A = read_sdf(dataA)
outputString = ""
if(operator=="UNI"): #if only one file is supplied
outputString = "*".join(set(list_A)) #removes all doubles using the set() function
else:
dataB = form.getvalue("dataB") #if two files are supplied
if(extension=="smi"):
list_B = process_smiles(dataB)
else:
list_B = read_sdf(dataB)
if(operator=="AND"):
outputString = "*".join(set(list_A) & set(list_B))
elif(operator=="OR"):
outputString = "*".join(set(list_A) | set(list_B))
elif(operator=="NOT"):
outputString = "*".join(set(list_A) - set(list_B))
if(sdf_output=="on"): #if we want the output as sdf
sdfs = write_sdf(outputString.replace("|"," ").split("*"))
outputString = "*".join(sdfs)
outputString = outputString.replace("\n","!").replace(" ","|")
#sends the output to index.html using javascript
print """
<html>
<head>
<input type="text" id="data" value=""" + outputString + """>
<script type="text/javascript">
parent.postMessage(data.value,"*");
</script>
</head>
</html>
"""
| 2.65625 | 3 |
mfem/_par/gridfunc.py | mfem/PyMFEM | 93 | 3267 | <filename>mfem/_par/gridfunc.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _gridfunc
else:
import _gridfunc
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _gridfunc.SWIG_PyInstanceMethod_New
_swig_new_static_method = _gridfunc.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.array
import mfem._par.mem_manager
import mfem._par.vector
import mfem._par.coefficient
import mfem._par.globals
import mfem._par.matrix
import mfem._par.operators
import mfem._par.intrules
import mfem._par.sparsemat
import mfem._par.densemat
import mfem._par.eltrans
import mfem._par.fe
import mfem._par.geom
import mfem._par.fespace
import mfem._par.mesh
import mfem._par.sort_pairs
import mfem._par.ncmesh
import mfem._par.vtk
import mfem._par.element
import mfem._par.table
import mfem._par.hash
import mfem._par.vertex
import mfem._par.fe_coll
import mfem._par.lininteg
import mfem._par.handle
import mfem._par.hypre
import mfem._par.restriction
import mfem._par.bilininteg
import mfem._par.linearform
import mfem._par.nonlininteg
class GridFunction(mfem._par.vector.Vector):
r"""Proxy of C++ mfem::GridFunction class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def MakeOwner(self, fec_):
r"""MakeOwner(GridFunction self, FiniteElementCollection fec_)"""
return _gridfunc.GridFunction_MakeOwner(self, fec_)
MakeOwner = _swig_new_instance_method(_gridfunc.GridFunction_MakeOwner)
def OwnFEC(self):
r"""OwnFEC(GridFunction self) -> FiniteElementCollection"""
return _gridfunc.GridFunction_OwnFEC(self)
OwnFEC = _swig_new_instance_method(_gridfunc.GridFunction_OwnFEC)
def VectorDim(self):
r"""VectorDim(GridFunction self) -> int"""
return _gridfunc.GridFunction_VectorDim(self)
VectorDim = _swig_new_instance_method(_gridfunc.GridFunction_VectorDim)
def GetTrueVector(self, *args):
r"""
GetTrueVector(GridFunction self) -> Vector
GetTrueVector(GridFunction self) -> Vector
"""
return _gridfunc.GridFunction_GetTrueVector(self, *args)
GetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueVector)
def GetTrueDofs(self, tv):
r"""GetTrueDofs(GridFunction self, Vector tv)"""
return _gridfunc.GridFunction_GetTrueDofs(self, tv)
GetTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_GetTrueDofs)
def SetTrueVector(self):
r"""SetTrueVector(GridFunction self)"""
return _gridfunc.GridFunction_SetTrueVector(self)
SetTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetTrueVector)
def SetFromTrueDofs(self, tv):
r"""SetFromTrueDofs(GridFunction self, Vector tv)"""
return _gridfunc.GridFunction_SetFromTrueDofs(self, tv)
SetFromTrueDofs = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueDofs)
def SetFromTrueVector(self):
r"""SetFromTrueVector(GridFunction self)"""
return _gridfunc.GridFunction_SetFromTrueVector(self)
SetFromTrueVector = _swig_new_instance_method(_gridfunc.GridFunction_SetFromTrueVector)
def GetValue(self, *args):
r"""
GetValue(GridFunction self, int i, IntegrationPoint ip, int vdim=1) -> double
GetValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, int comp=0, Vector tr=None) -> double
"""
return _gridfunc.GridFunction_GetValue(self, *args)
GetValue = _swig_new_instance_method(_gridfunc.GridFunction_GetValue)
def GetVectorValue(self, *args):
r"""
GetVectorValue(GridFunction self, int i, IntegrationPoint ip, Vector val)
GetVectorValue(GridFunction self, ElementTransformation T, IntegrationPoint ip, Vector val, Vector tr=None)
"""
return _gridfunc.GridFunction_GetVectorValue(self, *args)
GetVectorValue = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValue)
def GetValues(self, *args):
r"""
GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals, int vdim=1)
GetValues(GridFunction self, int i, IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1)
GetValues(GridFunction self, ElementTransformation T, IntegrationRule ir, Vector vals, int comp=0, DenseMatrix tr=None)
"""
return _gridfunc.GridFunction_GetValues(self, *args)
GetValues = _swig_new_instance_method(_gridfunc.GridFunction_GetValues)
def GetVectorValues(self, *args):
r"""
GetVectorValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr)
GetVectorValues(GridFunction self, ElementTransformation T, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr=None)
"""
return _gridfunc.GridFunction_GetVectorValues(self, *args)
GetVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorValues)
def GetFaceValues(self, i, side, ir, vals, tr, vdim=1):
r"""GetFaceValues(GridFunction self, int i, int side, IntegrationRule ir, Vector vals, DenseMatrix tr, int vdim=1) -> int"""
return _gridfunc.GridFunction_GetFaceValues(self, i, side, ir, vals, tr, vdim)
GetFaceValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceValues)
def GetFaceVectorValues(self, i, side, ir, vals, tr):
r"""GetFaceVectorValues(GridFunction self, int i, int side, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr) -> int"""
return _gridfunc.GridFunction_GetFaceVectorValues(self, i, side, ir, vals, tr)
GetFaceVectorValues = _swig_new_instance_method(_gridfunc.GridFunction_GetFaceVectorValues)
def GetLaplacians(self, *args):
r"""
GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps, int vdim=1)
GetLaplacians(GridFunction self, int i, IntegrationRule ir, Vector laps, DenseMatrix tr, int vdim=1)
"""
return _gridfunc.GridFunction_GetLaplacians(self, *args)
GetLaplacians = _swig_new_instance_method(_gridfunc.GridFunction_GetLaplacians)
def GetHessians(self, *args):
r"""
GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess, int vdim=1)
GetHessians(GridFunction self, int i, IntegrationRule ir, DenseMatrix hess, DenseMatrix tr, int vdim=1)
"""
return _gridfunc.GridFunction_GetHessians(self, *args)
GetHessians = _swig_new_instance_method(_gridfunc.GridFunction_GetHessians)
def GetValuesFrom(self, orig_func):
r"""GetValuesFrom(GridFunction self, GridFunction orig_func)"""
return _gridfunc.GridFunction_GetValuesFrom(self, orig_func)
GetValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetValuesFrom)
def GetBdrValuesFrom(self, orig_func):
r"""GetBdrValuesFrom(GridFunction self, GridFunction orig_func)"""
return _gridfunc.GridFunction_GetBdrValuesFrom(self, orig_func)
GetBdrValuesFrom = _swig_new_instance_method(_gridfunc.GridFunction_GetBdrValuesFrom)
def GetVectorFieldValues(self, i, ir, vals, tr, comp=0):
r"""GetVectorFieldValues(GridFunction self, int i, IntegrationRule ir, DenseMatrix vals, DenseMatrix tr, int comp=0)"""
return _gridfunc.GridFunction_GetVectorFieldValues(self, i, ir, vals, tr, comp)
GetVectorFieldValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldValues)
def ReorderByNodes(self):
r"""ReorderByNodes(GridFunction self)"""
return _gridfunc.GridFunction_ReorderByNodes(self)
ReorderByNodes = _swig_new_instance_method(_gridfunc.GridFunction_ReorderByNodes)
def GetNodalValues(self, *args):
'''
GetNodalValues(i) -> GetNodalValues(vector, vdim)
GetNodalValues(i, array<dobule>, vdim)
'''
from .vector import Vector
if len(args) == 1:
vec = Vector()
_gridfunc.GridFunction_GetNodalValues(self, vec, args[0])
vec.thisown = 0
return vec.GetDataArray()
else:
return _gridfunc.GridFunction_GetNodalValues(self, *args)
def GetVectorFieldNodalValues(self, val, comp):
r"""GetVectorFieldNodalValues(GridFunction self, Vector val, int comp)"""
return _gridfunc.GridFunction_GetVectorFieldNodalValues(self, val, comp)
GetVectorFieldNodalValues = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorFieldNodalValues)
def ProjectVectorFieldOn(self, vec_field, comp=0):
r"""ProjectVectorFieldOn(GridFunction self, GridFunction vec_field, int comp=0)"""
return _gridfunc.GridFunction_ProjectVectorFieldOn(self, vec_field, comp)
ProjectVectorFieldOn = _swig_new_instance_method(_gridfunc.GridFunction_ProjectVectorFieldOn)
def GetDerivative(self, comp, der_comp, der):
r"""GetDerivative(GridFunction self, int comp, int der_comp, GridFunction der)"""
return _gridfunc.GridFunction_GetDerivative(self, comp, der_comp, der)
GetDerivative = _swig_new_instance_method(_gridfunc.GridFunction_GetDerivative)
def GetDivergence(self, tr):
r"""GetDivergence(GridFunction self, ElementTransformation tr) -> double"""
return _gridfunc.GridFunction_GetDivergence(self, tr)
GetDivergence = _swig_new_instance_method(_gridfunc.GridFunction_GetDivergence)
def GetCurl(self, tr, curl):
r"""GetCurl(GridFunction self, ElementTransformation tr, Vector curl)"""
return _gridfunc.GridFunction_GetCurl(self, tr, curl)
GetCurl = _swig_new_instance_method(_gridfunc.GridFunction_GetCurl)
def GetGradient(self, tr, grad):
r"""GetGradient(GridFunction self, ElementTransformation tr, Vector grad)"""
return _gridfunc.GridFunction_GetGradient(self, tr, grad)
GetGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetGradient)
def GetGradients(self, *args):
r"""
GetGradients(GridFunction self, ElementTransformation tr, IntegrationRule ir, DenseMatrix grad)
GetGradients(GridFunction self, int const elem, IntegrationRule ir, DenseMatrix grad)
"""
return _gridfunc.GridFunction_GetGradients(self, *args)
GetGradients = _swig_new_instance_method(_gridfunc.GridFunction_GetGradients)
def GetVectorGradient(self, tr, grad):
r"""GetVectorGradient(GridFunction self, ElementTransformation tr, DenseMatrix grad)"""
return _gridfunc.GridFunction_GetVectorGradient(self, tr, grad)
GetVectorGradient = _swig_new_instance_method(_gridfunc.GridFunction_GetVectorGradient)
def GetElementAverages(self, avgs):
r"""GetElementAverages(GridFunction self, GridFunction avgs)"""
return _gridfunc.GridFunction_GetElementAverages(self, avgs)
GetElementAverages = _swig_new_instance_method(_gridfunc.GridFunction_GetElementAverages)
def GetElementDofValues(self, el, dof_vals):
r"""GetElementDofValues(GridFunction self, int el, Vector dof_vals)"""
return _gridfunc.GridFunction_GetElementDofValues(self, el, dof_vals)
GetElementDofValues = _swig_new_instance_method(_gridfunc.GridFunction_GetElementDofValues)
def ImposeBounds(self, *args):
r"""
ImposeBounds(GridFunction self, int i, Vector weights, Vector lo_, Vector hi_)
ImposeBounds(GridFunction self, int i, Vector weights, double min_=0.0, double max_=mfem::infinity())
"""
return _gridfunc.GridFunction_ImposeBounds(self, *args)
ImposeBounds = _swig_new_instance_method(_gridfunc.GridFunction_ImposeBounds)
def RestrictConforming(self):
r"""RestrictConforming(GridFunction self)"""
return _gridfunc.GridFunction_RestrictConforming(self)
RestrictConforming = _swig_new_instance_method(_gridfunc.GridFunction_RestrictConforming)
def ProjectGridFunction(self, src):
r"""ProjectGridFunction(GridFunction self, GridFunction src)"""
return _gridfunc.GridFunction_ProjectGridFunction(self, src)
ProjectGridFunction = _swig_new_instance_method(_gridfunc.GridFunction_ProjectGridFunction)
def ProjectCoefficient(self, *args):
r"""
ProjectCoefficient(GridFunction self, Coefficient coeff)
ProjectCoefficient(GridFunction self, Coefficient coeff, intArray dofs, int vd=0)
ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff)
ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray dofs)
ProjectCoefficient(GridFunction self, VectorCoefficient vcoeff, int attribute)
ProjectCoefficient(GridFunction self, mfem::Coefficient *[] coeff)
"""
return _gridfunc.GridFunction_ProjectCoefficient(self, *args)
ProjectCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectCoefficient)
ARITHMETIC = _gridfunc.GridFunction_ARITHMETIC
HARMONIC = _gridfunc.GridFunction_HARMONIC
def ProjectDiscCoefficient(self, *args):
r"""
ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff)
ProjectDiscCoefficient(GridFunction self, Coefficient coeff, mfem::GridFunction::AvgType type)
ProjectDiscCoefficient(GridFunction self, VectorCoefficient coeff, mfem::GridFunction::AvgType type)
"""
return _gridfunc.GridFunction_ProjectDiscCoefficient(self, *args)
ProjectDiscCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectDiscCoefficient)
def ProjectBdrCoefficient(self, *args):
r"""
ProjectBdrCoefficient(GridFunction self, Coefficient coeff, intArray attr)
ProjectBdrCoefficient(GridFunction self, VectorCoefficient vcoeff, intArray attr)
ProjectBdrCoefficient(GridFunction self, mfem::Coefficient *[] coeff, intArray attr)
"""
return _gridfunc.GridFunction_ProjectBdrCoefficient(self, *args)
ProjectBdrCoefficient = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficient)
def ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr):
r"""ProjectBdrCoefficientNormal(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)"""
return _gridfunc.GridFunction_ProjectBdrCoefficientNormal(self, vcoeff, bdr_attr)
ProjectBdrCoefficientNormal = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientNormal)
def ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr):
r"""ProjectBdrCoefficientTangent(GridFunction self, VectorCoefficient vcoeff, intArray bdr_attr)"""
return _gridfunc.GridFunction_ProjectBdrCoefficientTangent(self, vcoeff, bdr_attr)
ProjectBdrCoefficientTangent = _swig_new_instance_method(_gridfunc.GridFunction_ProjectBdrCoefficientTangent)
def ComputeL2Error(self, *args):
r"""
ComputeL2Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double
ComputeL2Error(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0) -> double
ComputeL2Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0, intArray elems=None) -> double
"""
return _gridfunc.GridFunction_ComputeL2Error(self, *args)
ComputeL2Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL2Error)
def ComputeGradError(self, exgrad, irs=0):
r"""ComputeGradError(GridFunction self, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double"""
return _gridfunc.GridFunction_ComputeGradError(self, exgrad, irs)
ComputeGradError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeGradError)
def ComputeCurlError(self, excurl, irs=0):
r"""ComputeCurlError(GridFunction self, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double"""
return _gridfunc.GridFunction_ComputeCurlError(self, excurl, irs)
ComputeCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeCurlError)
def ComputeDivError(self, exdiv, irs=0):
r"""ComputeDivError(GridFunction self, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double"""
return _gridfunc.GridFunction_ComputeDivError(self, exdiv, irs)
ComputeDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDivError)
def ComputeDGFaceJumpError(self, *args):
r"""
ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, JumpScaling jump_scaling, mfem::IntegrationRule const *[] irs=0) -> double
ComputeDGFaceJumpError(GridFunction self, Coefficient exsol, Coefficient ell_coeff, double Nu, mfem::IntegrationRule const *[] irs=0) -> double
"""
return _gridfunc.GridFunction_ComputeDGFaceJumpError(self, *args)
ComputeDGFaceJumpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeDGFaceJumpError)
def ComputeH1Error(self, *args):
r"""
ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, Coefficient ell_coef, double Nu, int norm_type) -> double
ComputeH1Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, mfem::IntegrationRule const *[] irs=0) -> double
"""
return _gridfunc.GridFunction_ComputeH1Error(self, *args)
ComputeH1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeH1Error)
def ComputeHDivError(self, exsol, exdiv, irs=0):
r"""ComputeHDivError(GridFunction self, VectorCoefficient exsol, Coefficient exdiv, mfem::IntegrationRule const *[] irs=0) -> double"""
return _gridfunc.GridFunction_ComputeHDivError(self, exsol, exdiv, irs)
ComputeHDivError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHDivError)
def ComputeHCurlError(self, exsol, excurl, irs=0):
r"""ComputeHCurlError(GridFunction self, VectorCoefficient exsol, VectorCoefficient excurl, mfem::IntegrationRule const *[] irs=0) -> double"""
return _gridfunc.GridFunction_ComputeHCurlError(self, exsol, excurl, irs)
ComputeHCurlError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeHCurlError)
def ComputeMaxError(self, *args):
r"""
ComputeMaxError(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double
ComputeMaxError(GridFunction self, mfem::Coefficient *[] exsol, mfem::IntegrationRule const *[] irs=0) -> double
ComputeMaxError(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double
"""
return _gridfunc.GridFunction_ComputeMaxError(self, *args)
ComputeMaxError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeMaxError)
def ComputeW11Error(self, exsol, exgrad, norm_type, elems=None, irs=0):
r"""ComputeW11Error(GridFunction self, Coefficient exsol, VectorCoefficient exgrad, int norm_type, intArray elems=None, mfem::IntegrationRule const *[] irs=0) -> double"""
return _gridfunc.GridFunction_ComputeW11Error(self, exsol, exgrad, norm_type, elems, irs)
ComputeW11Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeW11Error)
def ComputeL1Error(self, *args):
r"""
ComputeL1Error(GridFunction self, Coefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double
ComputeL1Error(GridFunction self, VectorCoefficient exsol, mfem::IntegrationRule const *[] irs=0) -> double
"""
return _gridfunc.GridFunction_ComputeL1Error(self, *args)
ComputeL1Error = _swig_new_instance_method(_gridfunc.GridFunction_ComputeL1Error)
def ComputeLpError(self, *args):
r"""
ComputeLpError(GridFunction self, double const p, Coefficient exsol, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0) -> double
ComputeLpError(GridFunction self, double const p, VectorCoefficient exsol, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0) -> double
"""
return _gridfunc.GridFunction_ComputeLpError(self, *args)
ComputeLpError = _swig_new_instance_method(_gridfunc.GridFunction_ComputeLpError)
def ComputeElementLpErrors(self, *args):
r"""
ComputeElementLpErrors(GridFunction self, double const p, Coefficient exsol, Vector error, Coefficient weight=None, mfem::IntegrationRule const *[] irs=0)
ComputeElementLpErrors(GridFunction self, double const p, VectorCoefficient exsol, Vector error, Coefficient weight=None, VectorCoefficient v_weight=None, mfem::IntegrationRule const *[] irs=0)
"""
return _gridfunc.GridFunction_ComputeElementLpErrors(self, *args)
ComputeElementLpErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementLpErrors)
def ComputeElementL1Errors(self, *args):
r"""
ComputeElementL1Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)
ComputeElementL1Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)
"""
return _gridfunc.GridFunction_ComputeElementL1Errors(self, *args)
ComputeElementL1Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL1Errors)
def ComputeElementL2Errors(self, *args):
r"""
ComputeElementL2Errors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)
ComputeElementL2Errors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)
"""
return _gridfunc.GridFunction_ComputeElementL2Errors(self, *args)
ComputeElementL2Errors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementL2Errors)
def ComputeElementMaxErrors(self, *args):
r"""
ComputeElementMaxErrors(GridFunction self, Coefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)
ComputeElementMaxErrors(GridFunction self, VectorCoefficient exsol, Vector error, mfem::IntegrationRule const *[] irs=0)
"""
return _gridfunc.GridFunction_ComputeElementMaxErrors(self, *args)
ComputeElementMaxErrors = _swig_new_instance_method(_gridfunc.GridFunction_ComputeElementMaxErrors)
def ComputeFlux(self, blfi, flux, wcoef=True, subdomain=-1):
r"""ComputeFlux(GridFunction self, BilinearFormIntegrator blfi, GridFunction flux, bool wcoef=True, int subdomain=-1)"""
return _gridfunc.GridFunction_ComputeFlux(self, blfi, flux, wcoef, subdomain)
ComputeFlux = _swig_new_instance_method(_gridfunc.GridFunction_ComputeFlux)
def Assign(self, *args):
r"""
Assign(GridFunction self, GridFunction rhs) -> GridFunction
Assign(GridFunction self, double value) -> GridFunction
Assign(GridFunction self, Vector v) -> GridFunction
"""
return _gridfunc.GridFunction_Assign(self, *args)
Assign = _swig_new_instance_method(_gridfunc.GridFunction_Assign)
def Update(self):
r"""Update(GridFunction self)"""
return _gridfunc.GridFunction_Update(self)
Update = _swig_new_instance_method(_gridfunc.GridFunction_Update)
def FESpace(self, *args):
r"""
FESpace(GridFunction self) -> FiniteElementSpace
FESpace(GridFunction self) -> FiniteElementSpace
"""
return _gridfunc.GridFunction_FESpace(self, *args)
FESpace = _swig_new_instance_method(_gridfunc.GridFunction_FESpace)
def SetSpace(self, f):
r"""SetSpace(GridFunction self, FiniteElementSpace f)"""
return _gridfunc.GridFunction_SetSpace(self, f)
SetSpace = _swig_new_instance_method(_gridfunc.GridFunction_SetSpace)
def MakeRef(self, *args):
r"""
MakeRef(GridFunction self, Vector base, int offset, int size)
MakeRef(GridFunction self, Vector base, int offset)
MakeRef(GridFunction self, FiniteElementSpace f, double * v)
MakeRef(GridFunction self, FiniteElementSpace f, Vector v, int v_offset)
"""
return _gridfunc.GridFunction_MakeRef(self, *args)
MakeRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeRef)
def MakeTRef(self, *args):
r"""
MakeTRef(GridFunction self, FiniteElementSpace f, double * tv)
MakeTRef(GridFunction self, FiniteElementSpace f, Vector tv, int tv_offset)
"""
return _gridfunc.GridFunction_MakeTRef(self, *args)
MakeTRef = _swig_new_instance_method(_gridfunc.GridFunction_MakeTRef)
def SaveVTK(self, out, field_name, ref):
r"""SaveVTK(GridFunction self, std::ostream & out, std::string const & field_name, int ref)"""
return _gridfunc.GridFunction_SaveVTK(self, out, field_name, ref)
SaveVTK = _swig_new_instance_method(_gridfunc.GridFunction_SaveVTK)
def SaveSTL(self, out, TimesToRefine=1):
r"""SaveSTL(GridFunction self, std::ostream & out, int TimesToRefine=1)"""
return _gridfunc.GridFunction_SaveSTL(self, out, TimesToRefine)
SaveSTL = _swig_new_instance_method(_gridfunc.GridFunction_SaveSTL)
__swig_destroy__ = _gridfunc.delete_GridFunction
def __init__(self, *args):
r"""
__init__(GridFunction self) -> GridFunction
__init__(GridFunction self, GridFunction orig) -> GridFunction
__init__(GridFunction self, FiniteElementSpace f) -> GridFunction
__init__(GridFunction self, FiniteElementSpace f, double * data) -> GridFunction
__init__(GridFunction self, Mesh m, std::istream & input) -> GridFunction
__init__(GridFunction self, Mesh m, mfem::GridFunction *[] gf_array, int num_pieces) -> GridFunction
__init__(GridFunction self, FiniteElementSpace fes, Vector v, int offset) -> GridFunction
"""
_gridfunc.GridFunction_swiginit(self, _gridfunc.new_GridFunction(*args))
def SaveToFile(self, gf_file, precision):
r"""SaveToFile(GridFunction self, char const * gf_file, int const precision)"""
return _gridfunc.GridFunction_SaveToFile(self, gf_file, precision)
SaveToFile = _swig_new_instance_method(_gridfunc.GridFunction_SaveToFile)
def WriteToStream(self, StringIO):
r"""WriteToStream(GridFunction self, PyObject * StringIO) -> PyObject *"""
return _gridfunc.GridFunction_WriteToStream(self, StringIO)
WriteToStream = _swig_new_instance_method(_gridfunc.GridFunction_WriteToStream)
def iadd(self, c):
r"""iadd(GridFunction self, GridFunction c) -> GridFunction"""
return _gridfunc.GridFunction_iadd(self, c)
iadd = _swig_new_instance_method(_gridfunc.GridFunction_iadd)
def isub(self, *args):
r"""
isub(GridFunction self, GridFunction c) -> GridFunction
isub(GridFunction self, double c) -> GridFunction
"""
return _gridfunc.GridFunction_isub(self, *args)
isub = _swig_new_instance_method(_gridfunc.GridFunction_isub)
def imul(self, c):
r"""imul(GridFunction self, double c) -> GridFunction"""
return _gridfunc.GridFunction_imul(self, c)
imul = _swig_new_instance_method(_gridfunc.GridFunction_imul)
def idiv(self, c):
r"""idiv(GridFunction self, double c) -> GridFunction"""
return _gridfunc.GridFunction_idiv(self, c)
idiv = _swig_new_instance_method(_gridfunc.GridFunction_idiv)
def Save(self, *args):
r"""
Save(GridFunction self, std::ostream & out)
Save(GridFunction self, char const * fname, int precision=16)
Save(GridFunction self, char const * file, int precision=16)
"""
return _gridfunc.GridFunction_Save(self, *args)
Save = _swig_new_instance_method(_gridfunc.GridFunction_Save)
def SaveGZ(self, file, precision=16):
r"""SaveGZ(GridFunction self, char const * file, int precision=16)"""
return _gridfunc.GridFunction_SaveGZ(self, file, precision)
SaveGZ = _swig_new_instance_method(_gridfunc.GridFunction_SaveGZ)
# Register GridFunction in _gridfunc:
_gridfunc.GridFunction_swigregister(GridFunction)
class JumpScaling(object):
r"""Proxy of C++ mfem::JumpScaling class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
CONSTANT = _gridfunc.JumpScaling_CONSTANT
ONE_OVER_H = _gridfunc.JumpScaling_ONE_OVER_H
P_SQUARED_OVER_H = _gridfunc.JumpScaling_P_SQUARED_OVER_H
def __init__(self, *args, **kwargs):
r"""__init__(JumpScaling self, double nu_=1.0, mfem::JumpScaling::JumpScalingType type_=CONSTANT) -> JumpScaling"""
_gridfunc.JumpScaling_swiginit(self, _gridfunc.new_JumpScaling(*args, **kwargs))
def Eval(self, h, p):
r"""Eval(JumpScaling self, double h, int p) -> double"""
return _gridfunc.JumpScaling_Eval(self, h, p)
Eval = _swig_new_instance_method(_gridfunc.JumpScaling_Eval)
__swig_destroy__ = _gridfunc.delete_JumpScaling
# Register JumpScaling in _gridfunc:
_gridfunc.JumpScaling_swigregister(JumpScaling)
class QuadratureFunction(mfem._par.vector.Vector):
r"""Proxy of C++ mfem::QuadratureFunction class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(QuadratureFunction self) -> QuadratureFunction
__init__(QuadratureFunction self, QuadratureFunction orig) -> QuadratureFunction
__init__(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=1) -> QuadratureFunction
__init__(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int vdim_=1) -> QuadratureFunction
__init__(QuadratureFunction self, Mesh mesh, std::istream & _in) -> QuadratureFunction
"""
_gridfunc.QuadratureFunction_swiginit(self, _gridfunc.new_QuadratureFunction(*args))
__swig_destroy__ = _gridfunc.delete_QuadratureFunction
def GetSpace(self):
r"""GetSpace(QuadratureFunction self) -> QuadratureSpace"""
return _gridfunc.QuadratureFunction_GetSpace(self)
GetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetSpace)
def SetSpace(self, *args):
r"""
SetSpace(QuadratureFunction self, QuadratureSpace qspace_, int vdim_=-1)
SetSpace(QuadratureFunction self, QuadratureSpace qspace_, double * qf_data, int vdim_=-1)
"""
return _gridfunc.QuadratureFunction_SetSpace(self, *args)
SetSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetSpace)
def GetVDim(self):
r"""GetVDim(QuadratureFunction self) -> int"""
return _gridfunc.QuadratureFunction_GetVDim(self)
GetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetVDim)
def SetVDim(self, vdim_):
r"""SetVDim(QuadratureFunction self, int vdim_)"""
return _gridfunc.QuadratureFunction_SetVDim(self, vdim_)
SetVDim = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetVDim)
def OwnsSpace(self):
r"""OwnsSpace(QuadratureFunction self) -> bool"""
return _gridfunc.QuadratureFunction_OwnsSpace(self)
OwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_OwnsSpace)
def SetOwnsSpace(self, own):
r"""SetOwnsSpace(QuadratureFunction self, bool own)"""
return _gridfunc.QuadratureFunction_SetOwnsSpace(self, own)
SetOwnsSpace = _swig_new_instance_method(_gridfunc.QuadratureFunction_SetOwnsSpace)
def GetElementIntRule(self, idx):
r"""GetElementIntRule(QuadratureFunction self, int idx) -> IntegrationRule"""
return _gridfunc.QuadratureFunction_GetElementIntRule(self, idx)
GetElementIntRule = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementIntRule)
def GetElementValues(self, *args):
r"""
GetElementValues(QuadratureFunction self, int idx, Vector values)
GetElementValues(QuadratureFunction self, int idx, Vector values)
GetElementValues(QuadratureFunction self, int idx, int const ip_num, Vector values)
GetElementValues(QuadratureFunction self, int idx, int const ip_num, Vector values)
GetElementValues(QuadratureFunction self, int idx, DenseMatrix values)
GetElementValues(QuadratureFunction self, int idx, DenseMatrix values)
"""
return _gridfunc.QuadratureFunction_GetElementValues(self, *args)
GetElementValues = _swig_new_instance_method(_gridfunc.QuadratureFunction_GetElementValues)
def Save(self, *args):
r"""
Save(QuadratureFunction self, std::ostream & out)
Save(QuadratureFunction self, char const * file, int precision=16)
"""
return _gridfunc.QuadratureFunction_Save(self, *args)
Save = _swig_new_instance_method(_gridfunc.QuadratureFunction_Save)
def SaveGZ(self, file, precision=16):
r"""SaveGZ(QuadratureFunction self, char const * file, int precision=16)"""
return _gridfunc.QuadratureFunction_SaveGZ(self, file, precision)
SaveGZ = _swig_new_instance_method(_gridfunc.QuadratureFunction_SaveGZ)
# Register QuadratureFunction in _gridfunc:
_gridfunc.QuadratureFunction_swigregister(QuadratureFunction)
def __lshift__(*args):
r"""
__lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream
__lshift__(std::ostream & out, Mesh mesh) -> std::ostream
__lshift__(std::ostream & out, GridFunction sol) -> std::ostream
__lshift__(std::ostream & out, QuadratureFunction qf) -> std::ostream &
"""
return _gridfunc.__lshift__(*args)
__lshift__ = _gridfunc.__lshift__
def ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags=None, with_subdomains=1, with_coeff=False):
r"""ZZErrorEstimator(BilinearFormIntegrator blfi, GridFunction u, GridFunction flux, Vector error_estimates, intArray aniso_flags=None, int with_subdomains=1, bool with_coeff=False) -> double"""
return _gridfunc.ZZErrorEstimator(blfi, u, flux, error_estimates, aniso_flags, with_subdomains, with_coeff)
ZZErrorEstimator = _gridfunc.ZZErrorEstimator
def ComputeElementLpDistance(p, i, gf1, gf2):
r"""ComputeElementLpDistance(double p, int i, GridFunction gf1, GridFunction gf2) -> double"""
return _gridfunc.ComputeElementLpDistance(p, i, gf1, gf2)
ComputeElementLpDistance = _gridfunc.ComputeElementLpDistance
class ExtrudeCoefficient(mfem._par.coefficient.Coefficient):
r"""Proxy of C++ mfem::ExtrudeCoefficient class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, m, s, n_):
r"""__init__(ExtrudeCoefficient self, Mesh m, Coefficient s, int n_) -> ExtrudeCoefficient"""
_gridfunc.ExtrudeCoefficient_swiginit(self, _gridfunc.new_ExtrudeCoefficient(m, s, n_))
def Eval(self, T, ip):
r"""Eval(ExtrudeCoefficient self, ElementTransformation T, IntegrationPoint ip) -> double"""
return _gridfunc.ExtrudeCoefficient_Eval(self, T, ip)
Eval = _swig_new_instance_method(_gridfunc.ExtrudeCoefficient_Eval)
__swig_destroy__ = _gridfunc.delete_ExtrudeCoefficient
# Register ExtrudeCoefficient in _gridfunc:
_gridfunc.ExtrudeCoefficient_swigregister(ExtrudeCoefficient)
def Extrude1DGridFunction(mesh, mesh2d, sol, ny):
r"""Extrude1DGridFunction(Mesh mesh, Mesh mesh2d, GridFunction sol, int const ny) -> GridFunction"""
return _gridfunc.Extrude1DGridFunction(mesh, mesh2d, sol, ny)
Extrude1DGridFunction = _gridfunc.Extrude1DGridFunction
def __iadd__(self, v):
ret = _gridfunc.GridFunction_iadd(self, v)
ret.thisown = 0
return self
def __isub__(self, v):
ret = _gridfunc.GridFunction_isub(self, v)
ret.thisown = 0
return self
def __idiv__(self, v):
ret = _gridfunc.GridFunction_idiv(self, v)
ret.thisown = 0
return self
def __imul__(self, v):
ret = _gridfunc.GridFunction_imul(self, v)
ret.thisown = 0
return self
GridFunction.__iadd__ = __iadd__
GridFunction.__idiv__ = __idiv__
GridFunction.__isub__ = __isub__
GridFunction.__imul__ = __imul__
| 2.15625 | 2 |
src/tracks/settings.py | adcarmichael/tracks | 0 | 3268 | import os
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PWA_SERVICE_WORKER_PATH = os.path.join(
BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')
print(os.path.join(
BASE_DIR, 'routes/static/routes/js', 'serviceworker.js'))
DEBUG = int(os.environ.get("DEBUG", default=0))
SECRET_KEY = os.environ.get("SECRET_KEY", '<KEY>')
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS", 'localhost').split(" ")
# Application definition
INSTALLED_APPS = [
'routes',
'accounts',
'dashboard.apps.DashboardConfig',
'api.apps.ApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'rest_framework',
'pwa',
]
# 'celery',
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tracks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tracks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = './static/'
MEDIA_ROOT = './media/'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
# no email for localhost or staging
EMAIL_USE_TLS = os.environ.get("EMAIL_USE_TLS")
EMAIL_HOST = os.environ.get("EMAIL_HOST")
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD")
EMAIL_PORT = os.environ.get("EMAIL_PORT")
EMAIL_BACKEND = os.environ.get("EMAIL_BACKEND")
DEFAULT_FROM_EMAIL = '<EMAIL>'
# CELERY
# CELERY_BROKER_URL = 'redis://redis:6379/0'
# CELERY_RESULT_BACKEND = 'redis://redis:6379/0'
# BROKER_URL = 'redis://localhost:6379/0'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379/'
# CELERY_ACCEPT_CONTENT = ['application/json']
# CELERY_TASK_SERIALIZER = 'json'
# CELERY_RESULT_SERIALIZER = 'json'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(levelname)s %(asctime)s %(module)s: %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'django.request': {
'level': 'INFO',
'handlers': ['console']
}
# 'celery': {
# 'handlers': ['console'],
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
# },
},
}
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
PWA_APP_NAME = 'ChalkTracks'
PWA_APP_DESCRIPTION = "Indoor Climbing Tracker"
PWA_APP_THEME_COLOR = '#000000'
PWA_APP_BACKGROUND_COLOR = '#000000'
PWA_APP_DISPLAY = 'standalone'
PWA_APP_SCOPE = '/'
PWA_APP_ORIENTATION = 'portrait'
PWA_APP_START_URL = '/'
PWA_APP_ICONS = [
{
'src': '/static/routes/favicon_io/favicon-32x32.png',
'sizes': '32x32',
"type": "image/png",
"purpose": "any maskable"
}, {
"src": "/static/routes/favicon_io/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png",
"purpose": "any maskable"
}, {
"src": "/static/routes/favicon_io/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png",
"purpose": "any maskable"
}
]
PWA_APP_DIR = 'ltr'
PWA_APP_LANG = 'en-US'
sentry_sdk.init(
dsn="https://[email protected]/1878812",
integrations=[DjangoIntegration()],
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
| 2.0625 | 2 |
examples/04-lights/plotter_builtins.py | akeshavan/pyvista | 0 | 3269 | <reponame>akeshavan/pyvista
"""
Plotter Lighting Systems
~~~~~~~~~~~~~~~~~~~~~~~~
The :class:`pyvista.Plotter` class comes with three options for the default
lighting system:
* a light kit consisting of a headlight and four camera lights,
* an illumination system containing three lights arranged around the camera,
* no lighting.
With meshes that don't have depth information encoded in their color the
importance of an appropriate lighting setup becomes paramount for accurate
visualization.
Light kit
=========
The default ``lighting='light kit'`` option recreates a lighting setup that
corresponds to a ``vtk.vtkLightKit``. We can check what type of lights this
lighting comprises:
"""
# sphinx_gallery_thumbnail_number = 3
import pyvista as pv
from pyvista import examples
# default: light kit
plotter = pv.Plotter()
light_types = [light.light_type for light in plotter.renderer.lights]
# Remove from plotters so output is not produced in docs
pv.plotting._ALL_PLOTTERS.clear()
light_types
###############################################################################
# Add a white terrain to the scene:
mesh = examples.download_st_helens().warp_by_scalar()
plotter = pv.Plotter()
plotter.add_mesh(mesh, color='white')
plotter.show()
###############################################################################
# Three-lights illumination
# =========================
#
# Switching to three-lights illumination gives a different character to the
# figure, in this case showing less contrast when viewing the mountain from
# the top, but having more contrast with views closer to the side. This becomes
# especially clear when exploring the figures interactively.
plotter = pv.Plotter(lighting='three lights')
plotter.add_mesh(mesh, color='white')
plotter.show()
###############################################################################
# Again we can check what kind of lights this setting uses:
plotter = pv.Plotter(lighting='three lights')
light_types = [light.light_type for light in plotter.renderer.lights]
# Remove from plotters so output is not produced in docs
pv.plotting._ALL_PLOTTERS.clear()
light_types
###############################################################################
# Custom lighting
# ===============
#
# We can introduce our own lighting from scratch by disabling any lighting
# on plotter initialization. Adding a single scene light to a scene will
# often result in ominous visuals due to objects having larger regions in
# shadow:
plotter = pv.Plotter(lighting='none')
plotter.add_mesh(mesh, color='white')
light = pv.Light()
light.set_direction_angle(30, 0)
plotter.add_light(light)
plotter.show()
| 2.828125 | 3 |
src/swimport/tests/15_char_arrays/main.py | talos-gis/swimport | 1 | 3270 | <reponame>talos-gis/swimport<gh_stars>1-10
from swimport.all import *
src = FileSource('src.h')
swim = Swim('example')
swim(pools.c_string)
swim(pools.numpy_arrays(r"../resources", allow_char_arrays=True))
swim(pools.include(src))
assert swim(Function.Behaviour()(src)) > 0
swim.write('example.i')
print('ok!') | 1.984375 | 2 |
ipyvolume/astro.py | larsoner/ipyvolume | 1 | 3271 | <gh_stars>1-10
import numpy as np
import PIL.Image
import pythreejs
import ipyvolume as ipv
from .datasets import UrlCached
def _randomSO3():
"""return random rotatation matrix, algo by <NAME>"""
u1 = np.random.random()
u2 = np.random.random()
u3 = np.random.random()
R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]])
v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)])
H = np.identity(3)-2*v*np.transpose([v])
return - np.dot(H, R)
def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}):
"""Create a fake galaxy around the points orbit_x/y/z with N_stars around it"""
if orbit_line_interpolate > 1:
import scipy.interpolate
x = np.linspace(0, 1, len(orbit_x))
x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate)
kind = 'quadratic'
orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth)
orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth)
orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth)
else:
orbit_x_line = orbit_x
orbit_y_line = orbit_y
orbit_z_line = orbit_z
line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible)
x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars))
y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars))
z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars))
xr, yr, zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))# +
r = np.sqrt(xr**2 + yr**2 + zr**2)
for i in range(N_stars):
a = np.linspace(0, 1, x.shape[0]) * 2 * np.pi * N_star_orbits
xo = r[i] * np.sin(a)
yo = r[i] * np.cos(a)
zo = a * 0
xo, yo, zo = np.dot(_randomSO3(), [xo, yo, zo])
#print(x.shape, xo.shape)
x[:, i] += xo
y[:, i] += yo
z[:, i] += zo
sprite = ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs)
with sprite.material.hold_sync():
sprite.material.blending = pythreejs.BlendingMode.CustomBlending
sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor
sprite.material.blendDst = pythreejs.BlendFactors.OneFactor
sprite.material.blendEquation = 'AddEquation'
sprite.material.transparent = True
sprite.material.depthWrite = False
sprite.material.alphaTest = 0.1
return sprite, line
def radial_sprite(shape, color):
color = np.array(color)
ara = np.zeros(shape[:2] + (4,), dtype=np.uint8)
x = np.linspace(-1, 1, shape[0])
y = np.linspace(-1, 1, shape[1])
x, y = np.meshgrid(x, y)
s = 0.5
radius = np.sqrt(x**2+y**2)
amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T
ara[...,3] = (amplitude * 255)
ara[...,:3] = color * amplitude.reshape(shape + (1,))
im = PIL.Image.fromarray(ara, 'RGBA')
return im
def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240, 240]):
import ipyvolume as ipv
rng = np.random.RandomState(seed)
x, y, z = rng.normal(size=(3, N))
r = np.sqrt(x**2 + y**2 + z**2)/(radius + thickness * radius * np.random.random(N))
x /= r
y /= r
z /= r
return ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100)
milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg'
milkyway_image = UrlCached(milkyway_url)
def plot_milkyway(R_sun=8, size=100):
mw_image = PIL.Image.open(milkyway_image.fetch())
rescale = 40
t = np.linspace(0, 1, 100)
xmw = np.linspace(0, 1, 10)
ymw = np.linspace(0, 1, 10)
xmw, ymw = np.meshgrid(xmw, ymw)
zmw = xmw * 0 + 0.01
mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False)
mw.material.blending = pythreejs.BlendingMode.CustomBlending
mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor
mw.material.blendDst = pythreejs.BlendFactors.OneFactor
mw.material.blendEquation = 'AddEquation'
mw.material.transparent = True
mw.material.depthWrite = False
mw.material.alphaTest = 0.1
ipv.xyzlim(size)
return mesh | 3 | 3 |
deepfunning/function.py | Zrealshadow/DeepFunning | 0 | 3272 | '''
* @author Waldinsamkeit
* @email <EMAIL>
* @create date 2020-09-25 14:33:38
* @desc
'''
import torch
'''--------------------- Weighted Binary cross Entropy ----------------------'''
'''
In Torch BCELoss, weight is set to every element of input instead of to every class
'''
def weighted_binary_cross_entropy(output, target, weights=None):
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output)) + \
weights[0] * ((1 - target) * torch.log(1 - output))
else:
loss = target * torch.log(output) + (1 - target) * torch.log(1 - output)
return torch.neg(torch.mean(loss))
''' ---------------------- Binary focal loss function -------------------------- '''
'''
In some degree, it can reduce the influence of imbalanced dataset
'''
def focal_loss(y_true,y_pred,device):
alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device)
y_pred=torch.clamp(y_pred,1e-7,1-1e-7)
return - alpha * y_true * torch.log(y_pred) * (1 - y_pred) ** gamma\
- (1 - alpha) * (1 - y_true) * torch.log(1 - y_pred) * y_pred
| 2.71875 | 3 |
dlms_cosem/hdlc/address.py | pwitab/dlms-cosem | 35 | 3273 | from typing import *
import attr
from dlms_cosem.hdlc import validators
@attr.s(auto_attribs=True)
class HdlcAddress:
"""
A client address shall always be expressed on one byte.
To enable addressing more than one logical device within a single physical device
and to support the multi-drop configuration the server address may be divided in
two parts– may be divided into two parts:
The logical address to address a logical device (separate addressable entity
within a physical device) makes up the upper HDLC address
The logical address must always be present.
The physical address is used to address a physical device ( a physical device on
a multi-drop)
The physical address can be omitted it not used.
"""
logical_address: int = attr.ib(validator=[validators.validate_hdlc_address])
physical_address: Optional[int] = attr.ib(
default=None, validator=[validators.validate_hdlc_address]
)
address_type: str = attr.ib(
default="client", validator=[validators.validate_hdlc_address_type]
)
@property
def length(self):
"""
The number of bytes the address makes up.
:return:
"""
return len(self.to_bytes())
def to_bytes(self):
out: List[Optional[int]] = list()
if self.address_type == "client":
# shift left 1 bit and set the lsb to mark end of address.
out.append(((self.logical_address << 1) | 0b00000001))
else:
# server address type
logical_higher, logical_lower = self._split_address(self.logical_address)
if self.physical_address:
physical_higher, physical_lower = self._split_address(
self.physical_address
)
# mark physical lower as end
physical_lower = physical_lower | 0b00000001
out.extend(
[logical_higher, logical_lower, physical_higher, physical_lower]
)
else:
# no physical address so mark the logial as end.
logical_lower = logical_lower | 0b00000001
out.extend([logical_higher, logical_lower])
out_bytes = list()
for address in out:
if address:
out_bytes.append(address.to_bytes(1, "big"))
return b"".join(out_bytes)
@staticmethod
def _split_address(address: int) -> Tuple[Optional[int], int]:
higher: Optional[int]
lower: int
if address > 0b01111111:
lower = (address & 0b0000000001111111) << 1
higher = (address & 0b0011111110000000) >> 6
else:
lower = address << 1
higher = None
return higher, lower
@staticmethod
def _address_to_byte(address: int) -> bytes:
return address.to_bytes(1, "big")
@classmethod
def destination_from_bytes(cls, frame_bytes: bytes, address_type: str):
destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes(
frame_bytes
)
(
destination_logical,
destination_physical,
destination_length,
) = destination_address_data
return cls(destination_logical, destination_physical, address_type)
@classmethod
def source_from_bytes(cls, frame_bytes: bytes, address_type: str):
_, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes)
source_logical, source_physical, source_length = source_address_data
return cls(source_logical, source_physical, address_type)
@staticmethod
def find_address_in_frame_bytes(
hdlc_frame_bytes: bytes,
) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]:
"""
address can be 1, 2 or 4 bytes long. the end byte is indicated by the of
the last byte LSB being 1
The first address is the destination address and the seconds is the
source address.
:param frame_bytes:
:return:
"""
# Find destination address.
destination_length: int = 1
destination_logical: int = 0
destination_physical: Optional[int] = 0
destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2), (6, 4)]
address_bytes: bytes
for pos, _length in destination_positions_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
destination_length = _length
break
continue
if destination_length == 1:
address_bytes = hdlc_frame_bytes[3].to_bytes(1, "big")
destination_logical = address_bytes[0] >> 1
destination_physical = None
elif destination_length == 2:
address_bytes = hdlc_frame_bytes[3:5]
destination_logical = address_bytes[0] >> 1
destination_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3:7]
destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
# Find source address
source_length: int = 1
source_logical: int = 0
source_physical: Optional[int] = 0
source_position_list: List[Tuple[int, int]] = [
(item[0] + destination_length, item[1])
for item in destination_positions_list
]
for pos, _length in source_position_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
source_length = _length
break
continue
if source_length == 1:
address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, "big")
source_logical = address_bytes[0] >> 1
source_physical = None
elif source_length == 2:
address_bytes = hdlc_frame_bytes[3 + destination_length : 5 + source_length]
source_logical = address_bytes[0] >> 1
source_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3 + destination_length : 7 + source_length]
source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
return (
(destination_logical, destination_physical, destination_length),
(source_logical, source_physical, source_length),
)
@staticmethod
def parse_two_byte_address(address_bytes: bytes):
if address_bytes != 2:
raise ValueError(f"Can only parse 2 bytes for address")
upper = address_bytes[0] >> 1
lower = address_bytes[1] >> 1
return lower + (upper << 7)
| 3.15625 | 3 |
benchmarks/benchmarks/stats.py | RasmusSemmle/scipy | 1 | 3274 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
try:
import scipy.stats as stats
except ImportError:
pass
from .common import Benchmark
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
class InferentialStats(Benchmark):
def setup(self):
np.random.seed(12345678)
self.a = stats.norm.rvs(loc=5, scale=10, size=500)
self.b = stats.norm.rvs(loc=8, scale=10, size=20)
self.c = stats.norm.rvs(loc=8, scale=20, size=20)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
class Distribution(Benchmark):
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
np.random.seed(12345678)
self.x = np.random.rand(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = "fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
class DescriptiveStats(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
np.random.seed(12345678)
self.levels = np.random.randint(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
| 2.21875 | 2 |
src/production_ecommerce/account/models.py | sheriffbarrow/production-ecommerce | 1 | 3275 | <gh_stars>1-10
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class MyAccountManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have a username')
user = self.model(
email=self.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(
email=self.normalize_email(email),
password=password,
username=username,
)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class UserVendor(AbstractBaseUser):
email = models.EmailField(verbose_name='email address',max_length=255,unique=True)
contact = models.IntegerField(null=True, blank=True)
username = models.CharField(max_length=30)
location = models.CharField(max_length=30)
profession = models.CharField(max_length=30)
experience = models.CharField(max_length=30)
verified_id = models.CharField(max_length=255)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
image = models.ImageField(default='profile1.png' ,upload_to='profiles/images/', null=True, blank=True)
# notice the absence of a "Password field", that is built in.
objects = MyAccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username'] # Email & Password are required by default.
def __str__(self):
return self.email
# For checking permissions. to keep it simple all admin have ALL permissons
def has_perm(self, perm, obj=None):
return self.is_admin
# Does this user have permission to view this app? (ALWAYS YES FOR SIMPLICITY)
def has_module_perms(self, app_label):
return True
class Client(AbstractBaseUser):
email = models.EmailField(verbose_name='email address',max_length=255,unique=True)
contact = models.IntegerField(null=True, blank=True)
username = models.CharField(max_length=30)
location = models.CharField(max_length=30)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
# notice the absence of a "Password field", that is built in.
objects = MyAccountManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username'] # Email & Password are required by default.
def __str__(self):
return self.email
# For checking permissions. to keep it simple all admin have ALL permissons
def has_perm(self, perm, obj=None):
return self.is_admin
# Does this user have permission to view this app? (ALWAYS YES FOR SIMPLICITY)
def has_module_perms(self, app_label):
return True
| 2.546875 | 3 |
dbtmetabase/models/config.py | fernandobrito/dbt-metabase | 0 | 3276 | from dataclasses import dataclass, field
from typing import Optional, Iterable, Union
@dataclass
class MetabaseConfig:
# Metabase Client
database: str
host: str
user: str
password: str
# Metabase additional connection opts
use_http: bool = False
verify: Union[str, bool] = True
# Metabase Sync
sync_skip: bool = False
sync_timeout: Optional[int] = None
@dataclass
class DbtConfig:
# dbt Reader
database: str
manifest_path: Optional[str] = None
path: Optional[str] = None
# dbt Target Models
schema: Optional[str] = None
schema_excludes: Iterable = field(default_factory=list)
includes: Iterable = field(default_factory=list)
excludes: Iterable = field(default_factory=list)
| 2.21875 | 2 |
src/plot_timeseries_outstanding_bytes.py | arunksaha/heap_tracker | 1 | 3277 | <reponame>arunksaha/heap_tracker<filename>src/plot_timeseries_outstanding_bytes.py<gh_stars>1-10
#
# Copyright 2018, <NAME> <<EMAIL>>
#
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime as dt
import sys
import os
# Open the file, read the string contents into a list,
# and return the list.
def GetLinesListFromFile(filename):
with open(filename) as f:
content = f.readlines()
return content
# Convert usecs (numeric) to datetime
# >>> ts = 1520189090755278 / 1000000.0
# >>> x = datetime.datetime.fromtimestamp(ts)
# >>> x.strftime('%Y-%m-%d %H:%M:%S.%f')
# '2018-03-04 10:44:50.755278'
def ConvertUsecsEpochToDateTime(usecs):
secs = usecs / 1000000.0
# Attempt to parse usecs throws:
# ValueError: year is out of range
# So, using secs instead. REVISIT.
# datetimeObj = dt.datetime.fromtimestamp(usecs)
datetimeObj = dt.datetime.fromtimestamp(secs)
# print usecs, secs, datetimeObj
return datetimeObj
# Take a list of string tuples (timestamp, metric),
# parses them into numerical values and returns
# separate lists.
def GetTxListFromFile(filename):
lineList = GetLinesListFromFile(filename)
datetimeList = []
outBytesList = []
for line in lineList:
tokens = line.split()
# print tokens
assert(len(tokens) >= 2)
usecs = int(tokens[0])
bytes = int(tokens[1])
datetimeObj = ConvertUsecsEpochToDateTime(usecs)
datetimeList.append(datetimeObj)
outBytesList.append(bytes)
return datetimeList, outBytesList
# Plotting driver program.
def driver(dataFile):
datetimeList, outBytesList = GetTxListFromFile(dataFile)
plt.subplots_adjust(bottom = 0.2)
plt.xticks(rotation = 25)
ax = plt.gca()
# Intended to show micro-seconds, but facing some problem,
# see REVISIT above.
# xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S.%f')
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
# Avoid scientific notatinn, use plain numbers.
ax.get_yaxis().get_major_formatter().set_scientific(False)
# Make the numbers comma separated.
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda bytes, p: format(int(bytes), ',')))
# Intended the y-axis numbers on both sides, but not working.
ax.yaxis.set_ticks_position('both')
plt.plot(datetimeList, outBytesList)
plt.title('Outstanding Bytes Timeseries')
plt.ylabel('bytes')
plt.xlabel('timestamp')
plt.grid(True)
plt.show()
# main
if len(sys.argv) == 1:
print "usage: {} <input-text-file>".format(sys.argv[0])
sys.exit(1)
driver(sys.argv[1])
| 2.671875 | 3 |
import.py | vmariano/meme-classifier | 0 | 3278 | from dotenv import load_dotenv
load_dotenv()
import sys
import os
import re
import json
import psycopg2
from meme_classifier.images import process_image
path = sys.argv[1]
data = json.load(open(os.path.join(path, 'result.json'), 'r'))
chat_id = data['id']
conn = psycopg2.connect(os.getenv('POSTGRES_CREDENTIALS'))
for m in data['messages']:
if 'photo' in m:
template, text = process_image(open(os.path.join(path, m['photo']), 'rb'))
message_id = m['id']
print(f'processing message {message_id}')
cur = conn.cursor()
cur.execute("INSERT INTO meme (template, text, chat_id, message_id) VALUES (%s, %s, %s, %s)", (template, text, chat_id, message_id))
conn.commit()
| 2.390625 | 2 |
nps/migrations/0013_auto_20180314_1805.py | jak0203/nps-dash | 0 | 3279 | # Generated by Django 2.0.3 on 2018-03-15 01:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nps', '0012_auto_20180314_1600'),
]
operations = [
migrations.CreateModel(
name='ClientAggregations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client', models.CharField(max_length=30)),
('survey', models.CharField(max_length=30)),
('user_type', models.CharField(blank=True, default=None, max_length=30, null=True)),
('nps_score', models.FloatField()),
('total_responses', models.IntegerField()),
('promoters', models.IntegerField()),
('detractors', models.IntegerField()),
('neutral', models.IntegerField()),
('percent_detractors', models.FloatField(blank=True, default=None, null=True)),
('percent_promoters', models.FloatField(blank=True, default=None, null=True)),
('percent_neutral', models.FloatField(blank=True, default=None, null=True)),
('statistically_significant', models.BooleanField(default=False)),
],
),
migrations.DeleteModel(
name='AggregatedResults',
),
migrations.DeleteModel(
name='ProductUsers',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_detractors',
new_name='detractors',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_neutral',
new_name='neutral',
),
migrations.RenameField(
model_name='productaggregations',
old_name='number_clients_negative',
new_name='num_clients_negative',
),
migrations.RenameField(
model_name='productaggregations',
old_name='number_clients_positive',
new_name='num_clients_positive',
),
migrations.RenameField(
model_name='productaggregations',
old_name='total_promoters',
new_name='promoters',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_detractors',
new_name='detractors',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_neutral',
new_name='neutral',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='number_clients_negative',
new_name='num_clients_negative',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='number_clients_positive',
new_name='num_clients_positive',
),
migrations.RenameField(
model_name='surveyaggregations',
old_name='total_promoters',
new_name='promoters',
),
migrations.RemoveField(
model_name='productaggregations',
name='number_clients_neutral',
),
migrations.RemoveField(
model_name='productaggregations',
name='percent_clients_neutral',
),
migrations.RemoveField(
model_name='surveyaggregations',
name='number_clients_neutral',
),
migrations.RemoveField(
model_name='surveyaggregations',
name='percent_clients_neutral',
),
migrations.AddField(
model_name='productaggregations',
name='user_type',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AddField(
model_name='surveyaggregations',
name='user_type',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterUniqueTogether(
name='clientaggregations',
unique_together={('client', 'survey', 'user_type')},
),
]
| 1.679688 | 2 |
docs/schema_mapping.py | NoAnyLove/pydantic | 1 | 3280 | <reponame>NoAnyLove/pydantic
#!/usr/bin/env python3
"""
Build a table of Python / Pydantic to JSON Schema mappings.
Done like this rather than as a raw rst table to make future edits easier.
Please edit this file directly not .tmp_schema_mappings.rst
"""
table = [
[
'bool',
'boolean',
'',
'JSON Schema Core',
''
],
[
'str',
'string',
'',
'JSON Schema Core',
''
],
[
'float',
'number',
'',
'JSON Schema Core',
''
],
[
'int',
'integer',
'',
'JSON Schema Validation',
''
],
[
'dict',
'object',
'',
'JSON Schema Core',
''
],
[
'list',
'array',
'',
'JSON Schema Core',
''
],
[
'tuple',
'array',
'',
'JSON Schema Core',
''
],
[
'set',
'array',
'{"uniqueItems": true}',
'JSON Schema Validation',
''
],
[
'List[str]',
'array',
'{"items": {"type": "string"}}',
'JSON Schema Validation',
'And equivalently for any other sub type, e.g. List[int].'
],
[
'Tuple[str, int]',
'array',
'{"items": [{"type": "string"}, {"type": "integer"}]}',
'JSON Schema Validation',
(
'And equivalently for any other set of subtypes. Note: If using schemas for OpenAPI, '
'you shouldn\'t use this declaration, as it would not be valid in OpenAPI (although it is '
'valid in JSON Schema).'
)
],
[
'Dict[str, int]',
'object',
'{"additionalProperties": {"type": "integer"}}',
'JSON Schema Validation',
(
'And equivalently for any other subfields for dicts. Have in mind that although you can use other types as '
'keys for dicts with Pydantic, only strings are valid keys for JSON, and so, only str is valid as '
'JSON Schema key types.'
)
],
[
'Union[str, int]',
'anyOf',
'{"anyOf": [{"type": "string"}, {"type": "integer"}]}',
'JSON Schema Validation',
'And equivalently for any other subfields for unions.'
],
[
'Enum',
'enum',
'{"enum": [...]}',
'JSON Schema Validation',
'All the literal values in the enum are included in the definition.'
],
[
'SecretStr',
'string',
'{"writeOnly": true}',
'JSON Schema Validation',
''
],
[
'SecretBytes',
'string',
'{"writeOnly": true}',
'JSON Schema Validation',
''
],
[
'EmailStr',
'string',
'{"format": "email"}',
'JSON Schema Validation',
''
],
[
'NameEmail',
'string',
'{"format": "name-email"}',
'Pydantic standard "format" extension',
''
],
[
'UrlStr',
'string',
'{"format": "uri"}',
'JSON Schema Validation',
''
],
[
'DSN',
'string',
'{"format": "dsn"}',
'Pydantic standard "format" extension',
''
],
[
'bytes',
'string',
'{"format": "binary"}',
'OpenAPI',
''
],
[
'Decimal',
'number',
'',
'JSON Schema Core',
''
],
[
'UUID1',
'string',
'{"format": "uuid1"}',
'Pydantic standard "format" extension',
''
],
[
'UUID3',
'string',
'{"format": "uuid3"}',
'Pydantic standard "format" extension',
''
],
[
'UUID4',
'string',
'{"format": "uuid4"}',
'Pydantic standard "format" extension',
''
],
[
'UUID5',
'string',
'{"format": "uuid5"}',
'Pydantic standard "format" extension',
''
],
[
'UUID',
'string',
'{"format": "uuid"}',
'Pydantic standard "format" extension',
'Suggested in OpenAPI.'
],
[
'FilePath',
'string',
'{"format": "file-path"}',
'Pydantic standard "format" extension',
''
],
[
'DirectoryPath',
'string',
'{"format": "directory-path"}',
'Pydantic standard "format" extension',
''
],
[
'Path',
'string',
'{"format": "path"}',
'Pydantic standard "format" extension',
''
],
[
'datetime',
'string',
'{"format": "date-time"}',
'JSON Schema Validation',
''
],
[
'date',
'string',
'{"format": "date"}',
'JSON Schema Validation',
''
],
[
'time',
'string',
'{"format": "time"}',
'JSON Schema Validation',
''
],
[
'timedelta',
'number',
'{"format": "time-delta"}',
'Difference in seconds (a ``float``), with Pydantic standard "format" extension',
'Suggested in JSON Schema repository\'s issues by maintainer.'
],
[
'Json',
'string',
'{"format": "json-string"}',
'Pydantic standard "format" extension',
''
],
[
'IPvAnyAddress',
'string',
'{"format": "ipvanyaddress"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 address as used in ``ipaddress`` module',
],
[
'IPvAnyInterface',
'string',
'{"format": "ipvanyinterface"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 interface as used in ``ipaddress`` module',
],
[
'IPvAnyNetwork',
'string',
'{"format": "ipvanynetwork"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 network as used in ``ipaddress`` module',
],
[
'StrictStr',
'string',
'',
'JSON Schema Core',
''
],
[
'ConstrainedStr',
'string',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``constr`` below.'
)
],
[
'constr(regex=\'^text$\', min_length=2, max_length=10)',
'string',
'{"pattern": "^text$", "minLength": 2, "maxLength": 10}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'ConstrainedInt',
'integer',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``conint`` below.'
)
],
[
'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'integer',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'PositiveInt',
'integer',
'{"exclusiveMinimum": 0}',
'JSON Schema Validation',
''
],
[
'NegativeInt',
'integer',
'{"exclusiveMaximum": 0}',
'JSON Schema Validation',
''
],
[
'ConstrainedFloat',
'number',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations.'
'See the mapping for ``confloat`` below.'
)
],
[
'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'number',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'PositiveFloat',
'number',
'{"exclusiveMinimum": 0}',
'JSON Schema Validation',
''
],
[
'NegativeFloat',
'number',
'{"exclusiveMaximum": 0}',
'JSON Schema Validation',
''
],
[
'ConstrainedDecimal',
'number',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``condecimal`` below.'
)
],
[
'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'number',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'BaseModel',
'object',
'',
'JSON Schema Core',
'All the properties defined will be defined with standard JSON Schema, including submodels.'
]
]
headings = [
'Python type',
'JSON Schema Type',
'Additional JSON Schema',
'Defined in',
'Notes',
]
v = ''
col_width = 300
for _ in range(5):
v += '+' + '-' * col_width
v += '+\n|'
for heading in headings:
v += f' {heading:{col_width - 2}} |'
v += '\n'
for _ in range(5):
v += '+' + '=' * col_width
v += '+'
for row in table:
v += '\n|'
for i, text in enumerate(row):
text = f'``{text}``' if i < 3 and text else text
v += f' {text:{col_width - 2}} |'
v += '\n'
for _ in range(5):
v += '+' + '-' * col_width
v += '+'
with open('.tmp_schema_mappings.rst', 'w') as f:
f.write(v)
| 2.34375 | 2 |
hubspot3/test/test_broadcast.py | kevin2357/hubspot3 | 1 | 3281 | import time
import unittest
from nose.plugins.attrib import attr
from hubspot3.test import helper
from hubspot3.broadcast import Broadcast, BroadcastClient
class BroadcastClientTest(unittest.TestCase):
""" Unit tests for the HubSpot Broadcast API Python client.
This file contains some unittest tests for the Broadcast API.
Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group
"""
def setUp(self):
self.client = BroadcastClient(**helper.get_options())
self.broadcast_guids = None
def tearDown(self):
# Cancel any broadcasts created as part of the tests
if self.broadcast_guids:
list(map(self.client.cancel_broadcast, self.broadcast_guids))
@attr("api")
def test_get_broadcasts(self):
# Should fetch at least 1 broadcast on the test portal 62515
broadcasts = self.client.get_broadcasts(limit=1)
self.assertTrue(len(broadcasts) > 0)
broadcast = broadcasts[0].to_dict()
self.assertIsNotNone(broadcast["channelGuid"])
print("\n\nFetched some broadcasts")
broadcast_guid = broadcast["broadcastGuid"]
# Re-fetch the broadcast using different call
bcast = self.client.get_broadcast(broadcast_guid)
# Should have expected fields
self.assertIsNotNone(bcast.broadcast_guid)
self.assertIsNotNone(bcast.channel_guid)
self.assertIsNotNone(bcast.status)
@attr("api")
def test_get_channels(self):
# Fetch older channels ensured to exist
channels = self.client.get_channels(current=True)
self.assertTrue(len(channels) > 0)
@attr("api")
def test_create_broadcast(self):
content = dict(body="Test hubspot3 unit tests http://www.hubspot.com")
channels = self.client.get_channels(current=True, publish_only=True)
if len(channels) == 0:
self.fail("Failed to find a publishable channel")
channel = channels[0]
# Get a trigger in the future
trigger_at = int(time.time() + 6000) * 1000
bcast = Broadcast(
{
"content": content,
"triggerAt": trigger_at,
"channelGuid": channel.channel_guid,
}
)
try:
resp = self.client.create_broadcast(bcast)
broadcast = Broadcast(resp)
self.assertIsNotNone(broadcast.broadcast_guid)
self.assertEqual(channel.channel_guid, broadcast.channel_guid)
# Ensure it is canceled
self.broadcast_guids = []
self.broadcast_guids.append(broadcast.broadcast_guid)
except Exception as e:
self.fail("Should not have raised exception: {}".format(e))
if __name__ == "__main__":
unittest.main()
| 2.6875 | 3 |
benchmark/benchmarks/testdata.py | theroggy/geofile_ops | 0 | 3282 | <filename>benchmark/benchmarks/testdata.py
# -*- coding: utf-8 -*-
"""
Module to prepare test data for benchmarking geo operations.
"""
import enum
import logging
from pathlib import Path
import pprint
import shutil
import sys
import tempfile
from typing import Optional
import urllib.request
import zipfile
# Add path so the benchmark packages are found
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
import geofileops as gfo
################################################################################
# Some inits
################################################################################
logger = logging.getLogger(__name__)
################################################################################
# The real work
################################################################################
class TestFile(enum.Enum):
AGRIPRC_2018 = (
0,
"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip",
"agriprc_2018.gpkg",
)
AGRIPRC_2019 = (
1,
"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip",
"agriprc_2019.gpkg",
)
COMMUNES = (
2,
"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip",
"communes.gpkg",
)
def __init__(self, value, url, filename):
self._value_ = value
self.url = url
self.filename = filename
def get_file(self, tmp_dir: Path) -> Path:
testfile_path = download_samplefile(
url=self.url, dst_name=self.filename, dst_dir=tmp_dir
)
testfile_info = gfo.get_layerinfo(testfile_path)
logger.debug(
f"TestFile {self.name} contains {testfile_info.featurecount} rows."
)
return testfile_path
def download_samplefile(
url: str, dst_name: str, dst_dir: Optional[Path] = None
) -> Path:
"""
Download a sample file to dest_path.
If it is zipped, it will be unzipped. If needed, it will be converted to
the file type as determined by the suffix of dst_name.
Args:
url (str): the url of the file to download
dst_dir (Path): the dir to downloaded the sample file to.
If it is None, a dir in the default tmp location will be
used. Defaults to None.
Returns:
Path: the path to the downloaded sample file.
"""
# If the destination path is a directory, use the default file name
dst_path = prepare_dst_path(dst_name, dst_dir)
# If the sample file already exists, return
if dst_path.exists():
return dst_path
# Make sure the destination directory exists
dst_path.parent.mkdir(parents=True, exist_ok=True)
# If the url points to a file with the same suffix as the dst_path,
# just download
url_path = Path(url)
if url_path.suffix.lower() == dst_path.suffix.lower():
logger.info(f"Download to {dst_path}")
urllib.request.urlretrieve(url, dst_path)
else:
# The file downloaded is different that the destination wanted, so some
# converting will need to be done
tmp_dir = dst_path.parent / "tmp"
try:
# Remove tmp dir if it exists already
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
tmp_dir.mkdir(parents=True, exist_ok=True)
# Download file
tmp_path = tmp_dir / f"{dst_path.stem}{url_path.suffix.lower()}"
logger.info(f"Download tmp data to {tmp_path}")
urllib.request.urlretrieve(url, tmp_path)
# If the temp file is a .zip file, unzip to dir
if tmp_path.suffix == ".zip":
# Unzip
unzippedzip_dir = dst_path.parent / tmp_path.stem
logger.info(f"Unzip to {unzippedzip_dir}")
with zipfile.ZipFile(tmp_path, "r") as zip_ref:
zip_ref.extractall(unzippedzip_dir)
# Look for the file
tmp_paths = []
for suffix in [".shp", ".gpkg"]:
tmp_paths.extend(list(unzippedzip_dir.rglob(f"*{suffix}")))
if len(tmp_paths) == 1:
tmp_path = tmp_paths[0]
else:
raise Exception(
f"Should find 1 geofile, found {len(tmp_paths)}: \n{pprint.pformat(tmp_paths)}"
)
if dst_path.suffix == tmp_path.suffix:
gfo.move(tmp_path, dst_path)
else:
logger.info(f"Convert tmp file to {dst_path}")
gfo.makevalid(tmp_path, dst_path)
finally:
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
return dst_path
def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None):
if dst_dir is None:
return Path(tempfile.gettempdir()) / "geofileops_sampledata" / dst_name
else:
return dst_dir / dst_name
| 2.34375 | 2 |
relocation/depth/setup_relocation_dir.py | ziyixi/SeisScripts | 0 | 3283 | <reponame>ziyixi/SeisScripts
"""
setup earthquake depth relocation directory
"""
import obspy
import sh
import numpy as np
import click
from os.path import join
from glob import glob
import copy
def generate_new_cmtsolution_files(cmts_dir, generated_cmts_dir, depth_perturbation_list):
cmt_names = glob(join(cmts_dir, "*"))
for cmt_file in cmt_names:
event = obspy.read_events(cmt_file)[0]
# gcmt_id = event.resource_id.id.split("/")[-2]
# there are some problems in changing names
gcmt_id = cmt_file.split("/")[-1]
# assume dirs like f"{generated_cmts_dir}/d-3" have already been created
for depth_per in depth_perturbation_list:
generated_name = join(generated_cmts_dir, f"d{depth_per}", gcmt_id)
# there are always problem in copy event, so here I'd like to read in the event again
event_this_depth = obspy.read_events(cmt_file)[0]
# event_this_depth = event.copy()
event_this_depth.origins[0].depth += 1000.0*depth_per
# print(generated_name, generated_cmts_dir, f"d{depth_per}", gcmt_id)
event_this_depth.write(generated_name, format="CMTSOLUTION")
def setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list):
# main
sh.mkdir("-p", main_dir)
# ref
sh.cp("-r", ref_dir, join(main_dir, "ref"))
# refine the structure in ref
sh.rm("-rf", join(main_dir, "ref", "DATABASES_MPI"))
sh.rm("-rf", join(main_dir, "ref", "EXAMPLES"))
sh.rm("-rf", join(main_dir, "ref", "OUTPUT_FILES"))
sh.rm("-rf", join(main_dir, "ref", "doc"))
sh.rm("-rf", join(main_dir, "ref", "tests"))
# mv DATA and utils to upper level
sh.mv(join(main_dir, "ref", "DATA"), main_dir)
sh.mv(join(main_dir, "ref", "utils"), main_dir)
# cmts
sh.mkdir("-p", join(main_dir, "cmts"))
sh.cp("-r", cmts_dir, join(main_dir, "cmts", "cmts_raw"))
sh.mkdir("-p", join(main_dir, "cmts", "cmts_generated"))
for depth_per in depth_perturbation_list:
sh.mkdir("-p", join(main_dir, "cmts",
"cmts_generated", f"d{depth_per}"))
# working directory
sh.mkdir("-p", join(main_dir, "work"))
def setup_structure_after_generat_cmts(main_dir, output_dir, depth_perturbation_list):
# get cmts names
cmt_dirs = glob(join(main_dir, "cmts", "cmts_raw", "*"))
cmt_names = [item.split("/")[-1] for item in cmt_dirs]
# mkdirs
for cmt_name in cmt_names:
sh.mkdir(join(main_dir, "work", cmt_name))
for depth_per in depth_perturbation_list:
# sh.mkdir(join(main_dir, "work", cmt_name, f"d{depth_per}"))
# cp ref to working dirs
sh.cp("-r", join(main_dir, "ref"),
join(main_dir, "work", cmt_name, f"d{depth_per}"))
# mv DATA and utils back to ref
sh.mv(join(main_dir, "DATA"), join(main_dir, "ref", "DATA"))
sh.mv(join(main_dir, "utils"), join(main_dir, "ref", "utils"))
# mkdir DATA in work directory
for cmt_name in cmt_names:
for depth_per in depth_perturbation_list:
sh.mkdir(join(main_dir, "work", cmt_name, f"d{depth_per}", "DATA"))
# cp and ln files in DATA
toln = ["cemRequest", "crust1.0", "crust2.0",
"crustmap", "epcrust", "eucrust-07", "GLL", "heterogen", "Lebedev_sea99", "Montagner_model", "old", "PPM", "QRFSI12", "s20rts", "s362ani", "s40rts", "Simons_model", "topo_bathy", "Zhao_JP_model"]
for cmt_name in cmt_names:
for depth_per in depth_perturbation_list:
sh.cp(join(main_dir, "cmts", "cmts_generated",
f"d{depth_per}", cmt_name), join(main_dir, "work", cmt_name, f"d{depth_per}", "DATA", "CMTSOLUTION"))
sh.cp(join(main_dir, "ref", "DATA", "Par_file"), join(
main_dir, "work", cmt_name, f"d{depth_per}", "DATA", "Par_file"))
sh.cp(join(main_dir, "ref", "DATA", "STATIONS"), join(
main_dir, "work", cmt_name, f"d{depth_per}", "DATA", "STATIONS"))
for lnfile in toln:
sh.ln("-s", join(main_dir, "ref", "DATA", lnfile), join(
main_dir, "work", cmt_name, f"d{depth_per}", "DATA", lnfile))
# ln in work files
toln_work = ["utils"]
for lnfile in toln_work:
sh.ln("-s", join(main_dir, "ref", lnfile), join(
main_dir, "work", cmt_name, f"d{depth_per}", lnfile))
# mkdir and ln DATABASE_MPI and OUTPUT_FILES
sh.mkdir("-p", output_dir)
sh.mkdir("-p", join(output_dir, "DATABASES_MPI"))
sh.mkdir("-p", join(output_dir, "OUTPUT_FILES"))
for cmt_name in cmt_names:
for depth_per in depth_perturbation_list:
sh.mkdir("-p", join(output_dir, "DATABASES_MPI",
cmt_name, f"d{depth_per}"))
sh.mkdir("-p", join(output_dir, "OUTPUT_FILES",
cmt_name, f"d{depth_per}"))
sh.ln("-s", join(output_dir, "DATABASES_MPI",
cmt_name, f"d{depth_per}"), join(main_dir, "work", cmt_name, f"d{depth_per}", "DATABASES_MPI"))
sh.ln("-s", join(output_dir, "OUTPUT_FILES",
cmt_name, f"d{depth_per}"), join(main_dir, "work", cmt_name, f"d{depth_per}", "OUTPUT_FILES"))
@click.command()
@click.option('--main_dir', required=True, help="the main working directory", type=str)
@click.option('--output_dir', required=True, help="the output directory in scratch", type=str)
@click.option('--ref_dir', required=True, help="the reference specfem directory", type=str)
@click.option('--cmts_dir', required=True, help="the cmt solution directory", type=str)
@click.option('--depth_perturbation', required=True, help="the depth perturbation, use somthing like -3,-1,5 (in km)", type=str)
def main(main_dir, output_dir, ref_dir, cmts_dir, depth_perturbation):
depth_perturbation_list = [float(item)
for item in depth_perturbation.split(",")]
setup_basic_structure(main_dir, ref_dir, cmts_dir, depth_perturbation_list)
generated_cmts_dir = join(main_dir, "cmts", "cmts_generated")
working_cmts_dir = join(main_dir, "cmts", "cmts_raw")
generate_new_cmtsolution_files(
working_cmts_dir, generated_cmts_dir, depth_perturbation_list)
setup_structure_after_generat_cmts(
main_dir, output_dir, depth_perturbation_list)
if __name__ == "__main__":
main()
| 2.046875 | 2 |
python-client/trustedanalytics/core/atktypes.py | blbarker/atk | 1 | 3284 | <reponame>blbarker/atk
# vim: set encoding=utf-8
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
trusted_analytics definitions for Data Types
"""
# TODO - consider server providing types, similar to commands
__all__ = ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64', 'vector', 'unit', 'datetime']
import numpy as np
import json
import re
# alias numpy types
float32 = np.float32
float64 = np.float64
int32 = np.int32
int64 = np.int64
from datetime import datetime
import dateutil.parser as datetime_parser
# Chose python's datetime over numpy.datetime64 because of time zone support and string serialization
# Here's a long thread discussing numpy's datetime64 timezone problem:
# http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html
# If need be, UDFs can create numpy objects from x using: numpy.datatime64(x.isoformat())
class _Vector(object):
base_type = np.ndarray
re_pattern = re.compile(r"^vector\((\d+)\)$")
def __init__(self, length):
self.length = int(length)
self.is_complex_type = True
self.constructor = self._get_constructor()
def _get_constructor(self):
length = self.length
def constructor(value):
"""
Creates a numpy array from a value, which can be one of many types
"""
if value is None:
return None
try:
# first try numpy's constructor
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
except:
# also support json or comma-sep string
if valid_data_types.value_is_string(value):
try:
value = json.loads(value)
except:
value = [np.float64(item.strip()) for item in value.split(',') if item]
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
else:
raise
array = np.atleast_1d(array) # numpy thing, so that vectors of size 1 will still have dimension and length
if len(array) != length:
raise ValueError("Could not construct vector in Python Client. Expected vector of length %s, but received length %d" % (length, len(array)))
return array
return constructor
@staticmethod
def get_from_string(data_type_str):
return _Vector(_Vector.re_pattern.match(data_type_str).group(1))
def __repr__(self):
return "vector(%d)" % self.length
vector = _Vector
class _Unit(object):
"""Ignore type used for schemas during file import"""
pass
unit = _Unit
class _Ignore(object):
"""Ignore type used for schemas during file import"""
pass
ignore = _Ignore
class _Unknown(object):
"""Unknown type used when type is indeterminate"""
pass
unknown = _Unknown
# map types to their string identifier
_primitive_type_to_str_table = {
#bool: "bool", TODO
#bytearray: "bytearray", TODO
#dict: "dict", TODO
float32: "float32",
float64: "float64",
int32: "int32",
int64: "int64",
#list: "list", TODO
unicode: "unicode",
ignore: "ignore",
datetime: "datetime",
}
# build reverse map string -> type
_primitive_str_to_type_table = dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()])
_primitive_alias_type_to_type_table = {
float: float64,
int: int32,
long: int64,
str: unicode,
#list: vector,
}
_primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()])
_primitive_type_to_default_value = {
#bool: False, TODO
float32: 0.0,
float64: 0.0,
int32: 0,
int64: 0,
unicode: "",
#datetime: "datetime",
}
def get_float_constructor(float_type):
"""Creates special constructor for floating point types which handles nan, inf, -inf"""
ft = float_type
def float_constructor(value):
result = ft(value)
if np.isnan(result) or result == np.inf or result == -np.inf: # this is 5x faster than calling np.isfinite()
return None
return ft(value)
return float_constructor
def datetime_constructor(value):
"""Creates special constructor for datetime parsing"""
if valid_data_types.value_is_string(value):
return datetime_parser.parse(value)
else:
try:
return datetime(*value)
except:
raise TypeError("cannot convert type to the datetime")
class _DataTypes(object):
"""
Provides functions with define and operate on supported data types.
"""
def __contains__(self, item):
try:
self.validate(item)
return True
except ValueError:
return False
def __repr__(self):
aliases = "\n(and aliases: %s)" % (", ".join(sorted(["%s->%s" % (alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()])))
return ", ".join(sorted(_primitive_str_to_type_table.keys() + ["vector(n)"])) + aliases
@staticmethod
def value_is_string(value):
"""get bool indication that value is a string, whether str or unicode"""
return isinstance(value, basestring)
@staticmethod
def value_is_missing_value(value):
return value is None or (type(value) in [float32, float64, float] and (np.isnan(value) or value in [np.inf, -np.inf]))
@staticmethod
def get_primitive_data_types():
return _primitive_type_to_str_table.keys()
@staticmethod
def to_string(data_type):
"""
Returns the string representation of the given type
Parameters
----------
data_type : type
valid data type; if invalid, a ValueError is raised
Returns
-------
result : str
string representation
Examples
--------
>>> valid_data_types.to_string(float32)
'float32'
"""
valid_data_type = _DataTypes.get_from_type(data_type)
try:
return _primitive_type_to_str_table[valid_data_type]
except KeyError:
# complex data types should use their repr
return repr(valid_data_type)
@staticmethod
def get_from_string(data_type_str):
"""
Returns the data type for the given type string representation
Parameters
----------
data_type_str : str
valid data type str; if invalid, a ValueError is raised
Returns
-------
result : type
type represented by the string
Examples
--------
>>> valid_data_types.get_from_string('unicode')
unicode
"""
try:
return _primitive_str_to_type_table[data_type_str]
except KeyError:
try:
return _primitive_alias_str_to_type_table[data_type_str]
except KeyError:
try:
return vector.get_from_string(data_type_str)
except:
raise ValueError("Unsupported type string '%s' " % data_type_str)
@staticmethod
def is_primitive_type(data_type):
return data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table
@staticmethod
def is_complex_type(data_type):
try:
return data_type.is_complex_type
except AttributeError:
return False
@staticmethod
def is_primitive_alias_type(data_type):
return data_type in _primitive_alias_type_to_type_table
@staticmethod
def get_from_type(data_type):
"""
Returns the data type for the given type (often it will return the same type)
Parameters
----------
data_type : type
valid data type or type that may be aliased for a valid data type;
if invalid, a ValueError is raised
Returns
-------
result : type
valid data type for given type
Examples
--------
>>> valid_data_types.get_from_type(int)
numpy.int32
"""
if _DataTypes.is_primitive_alias_type(data_type):
return _primitive_alias_type_to_type_table[data_type]
if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type):
return data_type
raise ValueError("Unsupported type %s" % data_type)
@staticmethod
def validate(data_type):
"""Raises a ValueError if data_type is not a valid data_type"""
_DataTypes.get_from_type(data_type)
@staticmethod
def get_constructor(to_type):
"""gets the constructor for the to_type"""
try:
return to_type.constructor
except AttributeError:
if to_type == float64 or to_type == float32:
return get_float_constructor(to_type)
if to_type == datetime:
return datetime_constructor
def constructor(value):
if value is None:
return None
return to_type(value)
return constructor
@staticmethod
def standardize_schema(schema):
return [(name, _DataTypes.get_from_type(t)) for name, t in schema]
@staticmethod
def validate_data(schema, data):
return [_DataTypes.cast(value, data_type) for value, data_type in zip(data, map(lambda t: t[1], schema))]
@staticmethod
def get_default_data_for_schema(schema):
return [_DataTypes.get_default_type_value(data_type) for name, data_type in schema]
@staticmethod
def get_default_type_value(data_type):
try:
return _primitive_type_to_default_value[data_type]
except KeyError:
if data_type == vector:
return []
if data_type == datetime:
return datetime.now()
raise ValueError("Unable to find default value for data type %s (invalid data type)" % data_type)
@staticmethod
def cast(value, to_type):
"""
Returns the given value cast to the given type. None is always returned as None
Parameters
----------
value : object
value to convert by casting
to_type : type
valid data type to use for the cast
Returns
-------
results : object
the value cast to the to_type
Examples
--------
>>> valid_data_types.cast(3, float64)
3.0
>>> valid_data_types.cast(4.5, str)
'4.5'
>>> valid_data_types.cast(None, str)
None
>>> valid_data_types.cast(np.inf, float32)
None
"""
if _DataTypes.value_is_missing_value(value): # Special handling for missing values
return None
elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization
return value
try:
constructor = _DataTypes.get_constructor(to_type)
result = constructor(value)
return None if _DataTypes.value_is_missing_value(result) else result
except Exception as e:
raise ValueError(("Unable to cast to type %s\n" % to_type) + str(e))
@staticmethod
def datetime_from_iso(iso_string):
"""create datetime object from ISO 8601 string"""
return datetime_parser.parse(iso_string)
valid_data_types = _DataTypes()
def numpy_to_bson_friendly(obj):
"""take an object and convert it to a type that can be serialized to bson if neccessary."""
if isinstance(obj, float32) or isinstance(obj, float64):
return float(obj)
if isinstance(obj, int32):
return int(obj)
if isinstance(obj, vector.base_type):
return obj.tolist()
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, dict):
return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()])
if isinstance(obj, list):
return [numpy_to_bson_friendly(item) for item in obj]
# Let the base class default method raise the TypeError
return obj
| 1.953125 | 2 |
srd/pageaggregator.py | poikilos/tabletopManualMiner | 0 | 3285 | #!/usr/bin/env python3
import math
try:
# from PDFPageDetailedAggregator:
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine
except ModuleNotFoundError:
prerr("To use the aggregator (required for generating chunks.json)"
" you must first install the following module for Python:")
prerr(" pdfminer")
exit(1)
try:
input = raw_input
except NameError:
# Python 3
pass
# TODO:
from srd import (
objDict,
BBox,
DocChunk,
clean_frag_text,
clean_frag,
same_style,
frag_dict,
)
def ltannoDict(ltanno):
return objDict(ltanno)
'''
class DocFragment:
def __init__(self, text, fontname, size):
self.text = text
self.fontname = fontname
self.size = size
def sameStyle(self, fragment):
"""
Is same fontname and size.
"""
ffn = fragment.fontname
ffs = fragment.size
return (ffs == self.size) and (ffn == self.fontname)
def clean(self):
self.text = clean_frag_text(self.text)
'''
class PDFPageDetailedAggregator(PDFPageAggregator):
"""
This class is based on PDFPageDetailedAggregator from
lindblandro's Oct 4 '13 at 10:33 answer
edited by slushy Feb 4 '14 at 23:41
at <https://stackoverflow.com/a/19179114>
on <https://stackoverflow.com/questions/15737806/extract-text-using-
pdfminer-and-pypdf2-merges-columns>.
"""
def __init__(self, rsrcmgr, pageno=1, laparams=None,
colStarts=None):
PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.chunks = []
self.colStarts = colStarts
if self.colStarts is not None:
print("columns: {}".format(len(self.colStarts)))
self.page_number = 0
def receive_layout(self, ltpage):
def render(item, page_number):
if isinstance(item, LTPage) or isinstance(item, LTTextBox):
for child in item:
render(child, page_number)
elif isinstance(item, LTTextLine):
child_str = ''
fontSize = None
fontName = None
fontSizes = []
fontNames = []
warnings = []
parts = []
fragments = []
annotations = []
for child in item:
strp = None
if isinstance(child, LTChar):
child_str += child.get_text()
strp = child.get_text().strip()
# and (len(strp) > 0)
if fontName is not None:
if fontName != child.fontname:
warnings.append("mixed fontName")
if fontSize is not None:
if fontSize != child.size:
warnings.append("mixed fontSize")
fontName = child.fontname
fontSize = child.size
frag = frag_dict(
child.get_text(),
child.fontname,
child.size,
)
fragments.append(frag)
# fontNames.append(fontName)
# fontSizes.append(fontSize)
parts.append(strp)
elif isinstance(child, LTAnno):
child_str += child.get_text()
strp = child.get_text().strip()
annotations.append(ltannoDict(child))
child_str = ' '.join(child_str.split()).strip()
if child_str:
if len(warnings) > 0:
"""
print("Warnings in \"{}\":"
" {}: fonts {} sizes {} parts {}"
"".format(child_str, warnings, fontNames,
fontSizes, parts))
input("Press enter to continue...")
"""
fontSize = None
fontName = None
col = None
cols = 0
if self.colStarts is not None:
cols = len(self.colStarts)
if (cols is None) or (cols == 1):
col = 0
elif (cols == 2):
col = 0
col2Min = math.floor(self.colStarts[1])
if item.bbox[0] >= col2Min:
col = 1 # Index [1] is column 2.
else:
raise ValueError("Only a list of length 1 (same as None) or 2"
" is implemented for \"colStarts\".")
# if isinstance(child, LTChar):
'''
try:
fontName = child.fontname
fontSize = child.size
# Avoid "AttributeError:
# 'LTAnno' object has no attribute 'fontname'"
except AttributeError as ex:
print("dir(LTTextLine): {}".format(dir(LTTextLine)))
print("dir(child): {}".format(dir(child)))
raise ex
'''
chunk = DocChunk(
page_number,
col,
item.bbox,
child_str,
fontName=fontName,
fontSize=fontSize,
fragments=fragments,
annotations=annotations,
)
chunk.groupFragments()
self.chunks.append(chunk)
for child in item:
render(child, page_number)
return
render(ltpage, self.page_number)
self.page_number += 1
self.chunks = sorted(self.chunks, key = lambda f: (f.pageid, f.column, -f.bbox.y1))
self.result = ltpage
| 2.4375 | 2 |
ctrltest.py | dkim286/cpsc454-proj | 0 | 3286 | from pox.core import core
import pox.openflow.libopenflow_01 as of
from forwarding.l2_learning import *
from tkinter import *
from project.firewall import TestFW
from project.ui import UI
def setup():
top = Toplevel()
# quit POX when window is killed
top.protocol("WM_DELETE_WINDOW", core.quit)
top.title("firewall thing")
frame = Frame(top, padding="3")
frame.grid()
disp = Label(frame, text="hmm").grid(column=0, row=0)
def reload():
conn = core.openflow.getConnection(1)
disp.configure(str(dir(conn)))
b_reload = Button(frame, text="reload", command=reload).grid(column=0, row=1)
b_quit = Button(frame, text="quit", command=top.destroy).grid(column=0, row=2)
def launch():
fw_list_dpid = [51, 52]
srv_list = {"web" : ['10.0.0.100']}
# register firewall
core.registerNew(TestFW, fw_list_dpid[0], srv_list)
# just use L2 learning switch for others
core.registerNew(l2_learning, False)
#core.registerNew(UI)
def start_ui():
core.tk.do(setup)
core.call_when_ready(start_ui, ['openflow', 'tk'])
| 2.5 | 2 |
virtual/lib/python3.6/site-packages/mako/__init__.py | kenmutuma001/Blog | 1 | 3287 | # mako/__init__.py
# Copyright (C) 2006-2016 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__version__ = '1.0.9'
| 0.933594 | 1 |
image_classification/T2T_ViT/load_pytorch_weights.py | RangeKing/PaddleViT | 0 | 3288 | <reponame>RangeKing/PaddleViT<filename>image_classification/T2T_ViT/load_pytorch_weights.py
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""convert pytorch model weights to paddle pdparams"""
import os
import numpy as np
import paddle
import torch
import timm
from config import get_config
from t2t_vit import build_t2t_vit as build_model
from T2T_ViT_torch.models.t2t_vit import *
from T2T_ViT_torch.utils import load_for_transfer_learning
def print_model_named_params(model):
print('----------------------------------')
for name, param in model.named_parameters():
print(name, param.shape)
print('----------------------------------')
def print_model_named_buffers(model):
print('----------------------------------')
for name, param in model.named_buffers():
print(name, param.shape)
print('----------------------------------')
def torch_to_paddle_mapping(model_name, config):
# (torch_param_name, paddle_param_name)
mapping = [
('cls_token', 'cls_token'),
('pos_embed', 'pos_embed'),
]
for idx in range(1, 3):
th_prefix = f'tokens_to_token.attention{idx}'
pp_prefix = f'patch_embed.attn{idx}'
if '_t_' in model_name:
layer_mapping = [
(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),
(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),
(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),
]
else:
layer_mapping = [
(f'{th_prefix}.w', f'{pp_prefix}.w'),
(f'{th_prefix}.kqv', f'{pp_prefix}.kqv'),
(f'{th_prefix}.proj', f'{pp_prefix}.proj'),
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'),
(f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'),
]
mapping.extend(layer_mapping)
mapping.append(('tokens_to_token.project','patch_embed.proj'))
num_layers = config.MODEL.DEPTH
for idx in range(num_layers):
th_prefix = f'blocks.{idx}'
pp_prefix = f'blocks.{idx}'
layer_mapping = [
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),
(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),
(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),
]
mapping.extend(layer_mapping)
head_mapping = [
('norm', 'norm'),
('head', 'head'),
]
mapping.extend(head_mapping)
return mapping
def convert(torch_model, paddle_model, model_name, config):
def _set_value(th_name, pd_name, transpose=True):
th_shape = th_params[th_name].shape
pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list
#assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'
print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')
if isinstance(th_params[th_name], torch.nn.parameter.Parameter):
value = th_params[th_name].data.numpy()
else:
value = th_params[th_name].numpy()
if len(value.shape) == 2 and transpose:
value = value.transpose((1, 0))
pd_params[pd_name].set_value(value)
# 1. get paddle and torch model parameters
pd_params = {}
th_params = {}
for name, param in paddle_model.named_parameters():
pd_params[name] = param
for name, param in torch_model.named_parameters():
th_params[name] = param
for name, param in paddle_model.named_buffers():
pd_params[name] = param
for name, param in torch_model.named_buffers():
th_params[name] = param
# 2. get name mapping pairs
mapping = torch_to_paddle_mapping(model_name, config)
missing_keys_th = []
missing_keys_pd = []
zip_map = list(zip(*mapping))
th_keys = list(zip_map[0])
pd_keys = list(zip_map[1])
for key in th_params:
missing = False
if key not in th_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in th_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in th_keys:
missing = False
if missing:
missing_keys_th.append(key)
for key in pd_params:
missing = False
if key not in pd_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in pd_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in pd_keys:
missing = False
if missing:
missing_keys_pd.append(key)
print('====================================')
print('missing_keys_pytorch:')
print(missing_keys_th)
print('missing_keys_paddle:')
print(missing_keys_pd)
print('====================================')
# 3. set torch param values to paddle params: may needs transpose on weights
for th_name, pd_name in mapping:
if th_name in th_params and pd_name in pd_params: # nn.Parameters
if th_name.endswith('w'):
_set_value(th_name, pd_name, transpose=False)
else:
_set_value(th_name, pd_name)
else:
if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:
th_name_w = f'{th_name}.weight'
pd_name_w = f'{pd_name}.weight'
_set_value(th_name_w, pd_name_w)
if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:
th_name_b = f'{th_name}.bias'
pd_name_b = f'{pd_name}.bias'
_set_value(th_name_b, pd_name_b)
if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params:
th_name_b = f'{th_name}.running_mean'
pd_name_b = f'{pd_name}._mean'
_set_value(th_name_b, pd_name_b)
if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params:
th_name_b = f'{th_name}.running_var'
pd_name_b = f'{pd_name}._variance'
_set_value(th_name_b, pd_name_b)
return paddle_model
def main():
paddle.set_device('cpu')
model_name_list = ['t2t_vit_7',
't2t_vit_10',
't2t_vit_12',
't2t_vit_14',
't2t_vit_14_384',
't2t_vit_19',
't2t_vit_24',
't2t_vit_24_token_labeling',
't2t_vit_t_14',
't2t_vit_t_19',
't2t_vit_t_24']
pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar']
for model_name, pth_model_path in zip(model_name_list, pth_model_path_list):
print(f'============= NOW: {model_name} =============')
sz = 384 if '384' in model_name else 224
if 'token_labeling' in model_name:
config = get_config(f'./configs/{model_name[:-15]}.yaml')
else:
config = get_config(f'./configs/{model_name}.yaml')
paddle_model = build_model(config)
paddle_model.eval()
print_model_named_params(paddle_model)
print_model_named_buffers(paddle_model)
print('+++++++++++++++++++++++++++++++++++')
device = torch.device('cpu')
if 'token_labeling' in model_name:
torch_model = eval(f'{model_name[:-15]}(img_size={sz})')
else:
if '384' in model_name:
torch_model = eval(f'{model_name[:-4]}(img_size={sz})')
else:
torch_model = eval(f'{model_name}(img_size={sz})')
load_for_transfer_learning(torch_model,
pth_model_path,
use_ema=True,
strict=False,
num_classes=1000)
torch_model = torch_model.to(device)
torch_model.eval()
print_model_named_params(torch_model)
print_model_named_buffers(torch_model)
# convert weights
paddle_model = convert(torch_model, paddle_model, model_name, config)
# check correctness
x = np.random.randn(2, 3, sz, sz).astype('float32')
x_paddle = paddle.to_tensor(x)
x_torch = torch.Tensor(x).to(device)
out_torch = torch_model(x_torch)
out_paddle = paddle_model(x_paddle)
out_torch = out_torch.data.cpu().numpy()
out_paddle = out_paddle.cpu().numpy()
print(out_torch.shape, out_paddle.shape)
print(out_torch[0, 0:100])
print('========================================================')
print(out_paddle[0, 0:100])
assert np.allclose(out_torch, out_paddle, atol = 1e-2)
# save weights for paddle model
model_path = os.path.join(f'./{model_name}.pdparams')
paddle.save(paddle_model.state_dict(), model_path)
print(f'{model_name} done')
print('all done')
if __name__ == "__main__":
main()
| 2.484375 | 2 |
estimators/__init__.py | j-bac/id-concentration | 0 | 3289 | <reponame>j-bac/id-concentration
from ._FisherS import randsphere, preprocessing, SeparabilityAnalysis, point_inseparability_to_pointID
from ._call_estimators import TwoNN, run_singleGMST,run_singleCorrDim,runDANCo, runDANCoStats, runDANColoop,runANOVAglobal,runANOVAlocal,radovanovic_estimators_matlab,Hidalgo
from ._DANCo import dancoDimEst as danco_py
from ._TwoNN import twonn as twonn_py
from ._ESS import essLocalDimEst as ess_py
from ._mada import mada as mada_py
from ._corint import corint as corint_py
| 0.863281 | 1 |
examples/get_message.py | NeroAsmarr/fz-api | 71 | 3290 | # 获取调课、改课通知例子
from zfnew import GetInfo, Login
base_url = '学校教务系统的主页url'
lgn = Login(base_url=base_url)
lgn.login('账号', '密码')
cookies = lgn.cookies # cookies获取方法
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
print(message)
| 2.5 | 2 |
input/gera_entradas.py | AtilioA/Sort-merge-join | 0 | 3291 | import sys
import random
from faker import Faker
def gera(nLinhas=100, nCampos=None):
with open(f"{path}/file{nLinhas}-{nCampos}_python.txt", "w+", encoding="utf8") as file:
if not nCampos:
nCampos = random.randint(2, 10)
camposFuncs = [
fake.name,
fake.date,
fake.ssn,
fake.ascii_email,
fake.job,
fake.phone_number,
fake.coordinate,
fake.license_plate,
fake.credit_card_expire,
][:nCampos]
for _ in range(nLinhas):
file.write(f"{random.randint(0, 999999)},")
for funcao in camposFuncs[:-1]:
file.write(f"{funcao()},")
file.write(camposFuncs[-1]())
file.write("\n")
if __name__ == "__main__":
fake = Faker("pt_BR")
path = "python/"
try:
nLinhas = int(sys.argv[1])
nCampos = int(sys.argv[2])
except:
nLinhas = 1000
nCampos = 10
gera(nLinhas, nCampos)
| 2.875 | 3 |
lessons 20/HomeWork/task9.py | zainllw0w/skillbox | 0 | 3292 | <reponame>zainllw0w/skillbox
def sort(data, time):
tt = False
ft = True
st = False
is_find = True
winers_name = set()
index = 0
while is_find:
index += 1
for key, values in data.items():
if time[0 - index] == int(values[1]) and ft and values[0] not in winers_name:
first_id = key
ft = False
st = True
winers_name.add(values[0])
first_i = index
elif time[0 -index] == int(values[1]) and st and values[0] not in winers_name:
second_id = key
st = False
tt = True
winers_name.add(values[0])
second_i = index
elif time[0 -index] == int(values[1]) and tt and values[0] not in winers_name:
three_id = key
winers_name.add(values[0])
is_find = False
three_i = index
break
return first_id, second_id, three_id, first_i, second_i, three_i
n = int(input('Введите количество строк: '))
data = dict()
time_list = list()
for i in range(1, n+1):
print(f'Введите {i} строку: ', end='')
text = input().split()
time = text[0]
time_list.append(int(time))
name = text[1]
obj = [name, time]
data[i] = tuple(obj)
f, s, t, fi, si, ti = sort(data, sorted(time_list))
time_list = sorted(time_list)
print('1 место занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi]))
print('2 место занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si]))
print('3 место занимает: {0}, с очками {1}'.format(data[t][0], time_list[-ti])) | 3.484375 | 3 |
src/test-apps/happy/test-templates/WeaveInetDNS.py | aiw-google/openweave-core | 1 | 3293 | #!/usr/bin/env python
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Implements WeaveInet class that tests Weave Inet Layer among Weave Nodes.
#
import os
import sys
import time
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
from happy.HappyNode import HappyNode
from happy.HappyNetwork import HappyNetwork
from WeaveTest import WeaveTest
# Q: what are the parameters need to specify?
options = {}
options["quiet"] = False
options["node_id"] = None
options["tap_if"] = None
options["node_ip"] = None
options["ipv4_gateway"] = None
options["dns"] = None
options["use_lwip"] = False
def option():
return options.copy()
class WeaveInetDNS(HappyNode, HappyNetwork, WeaveTest):
def __init__(self, opts = options):
HappyNode.__init__(self)
HappyNetwork.__init__(self)
WeaveTest.__init__(self)
self.quiet = opts["quiet"]
self.node_id = opts["node_id"]
self.tap_if = opts["tap_if"]
self.prefix = opts["prefix"]
self.ipv4_gateway =opts["ipv4_gateway"]
self.dns = opts["dns"]
self.use_lwip = opts["use_lwip"]
self.node_process_tag = "WEAVE-INET-NODE"
def __log_error_and_exit(self, error):
self.logger.error("[localhost] WeaveInetDNS: %s" % (error))
sys.exit(1)
def __checkNodeExists(self, node, description):
if not self._nodeExists(node):
emsg = "The %s '%s' does not exist in the test topology." % (description, node)
self.__log_error_and_exit(emsg)
def __pre_check(self):
# Check if the name of the new node is given
if not self.node_id:
emsg = "Missing name of the virtual node that should start shell."
self.__log_error_and_exit(emsg)
# Check if virtual node exists
if not self._nodeExists():
emsg = "virtual node %s does not exist." % (self.node_id)
self.__log_error_and_exit(emsg)
# check if prefix
if self.prefix == None:
emsg = "prefix is None, Please specifiy a valid prefix."
self.__log_error_and_exit(emsg)
def __gather_results(self):
"""
gather result from get_test_output()
"""
quiet = True
results = {}
results['status'], results['output'] = self.get_test_output(self.node_id, self.node_process_tag, quiet)
return (results)
def __process_results(self, results):
"""
process results from gather_results()
"""
status = False
output = ""
status = (results['status'] == 0)
output = results['output']
return (status, output)
def __start_node_dnscheck(self):
"""
lwip and socket use different command for now
"""
cmd = "sudo "
cmd += self.getWeaveInetLayerDNSPath()
node_ip = self.getNodeAddressesOnPrefix(self.prefix, self.node_id)[0]
if node_ip == None:
emsg = "Could not find IP address of the node, %s" % (self.node_id)
self.__log_error_and_exit(emsg)
if self.use_lwip:
cmd += " --tap-device " + self.tap_if + " -a " + node_ip + " --ipv4-gateway " + self.ipv4_gateway + \
" --dns-server " + self.dns
print "dns check command : {}".format(cmd)
self.start_weave_process(self.node_id, cmd, self.node_process_tag, sync_on_output=self.ready_to_service_events_str)
def __stop_node(self):
self.stop_weave_process(self.node_id, self.node_process_tag)
def run(self):
self.logger.debug("[localhost] WeaveInetDNS: Run.")
self.__pre_check()
self.__start_node_dnscheck()
emsg = "WeaveInet %s should be running." % (self.node_process_tag)
self.logger.debug("[%s] WeaveInet: %s" % (self.node_id, emsg))
self.__stop_node()
node_output_value, node_output_data = \
self.get_test_output(self.node_id, self.node_process_tag, True)
node_strace_value, node_strace_data = \
self.get_test_strace(self.node_id, self.node_process_tag, True)
results = self.__gather_results()
result, output = self.__process_results(results)
data = {}
data["node_output"] = node_output_data
data["node_strace"] = node_strace_data
self.logger.debug("[localhost] WeaveInetDNSTest: Done.")
return ReturnMsg(result, data)
| 2.125 | 2 |
funcoes.py | ZezaoDev/Circtrigo | 0 | 3294 | <reponame>ZezaoDev/Circtrigo
import turtle as t
import math
class circTrigo:
def __init__(self):
self.raio = 0
self.grau = 0
self.seno = 0
self.cosseno = 0
self.tangente = 0
self.quadrante = 0
self.tema = ''
t.bgcolor("black")
t.pencolor("white")
def seta(self):
# DESENHA UMA SETA
t.left(90)
t.forward(5)
t.right(120)
t.forward(10)
t.right(120)
t.forward(10)
t.right(120)
t.forward(5)
t.right(90)
def linha(self, pxls):
# DESENHA UMA LINHA PONTILHADA
pixels = int(pxls//1)
if pixels % 2 == 0:
pixels = pixels + 1
for x in range(0, pixels//10):
t.pendown()
t.forward(5)
t.penup()
t.forward(5)
t.pendown()
t.forward(pixels%10)
def reset(self):
# RETORNA PRA POSICAO INICIAL
t.penup()
t.home()
t.pendown()
t.speed(0)
t.pensize(2)
t.pencolor("white")
def circulo(self, raio):
# DESENHA O CIRCULO
self.raio = raio
t.right(90)
t.penup()
t.forward(self.raio)
t.left(90)
t.pendown()
t.circle(self.raio)
self.reset()
def eixos(self):
# EIXO X
t.penup()
t.backward(self.raio + 50)
t.pendown()
self.linha((self.raio*2)+100)
self.seta()
self.reset()
# EIXO Y
t.left(90)
t.penup()
t.backward(self.raio + 50)
t.pendown()
self.linha((self.raio*2)+100)
self.seta()
self.reset()
def angulo(self, grau):
# DESENHA O ANGULO
self.grau = grau % 360
t.left(self.grau)
t.forward(self.raio)
self.reset()
# DEFINE O VALOR DO SENO, COSSENO E TANGENTE.
self.seno = math.sin(math.radians(self.grau))
self.cosseno = math.cos(math.radians(self.grau))
self.tangente = math.tan(math.radians(self.grau))
# DEFINE O QUADRANTE DO ANGULO
vquad = self.grau
if 0 < vquad < 90:
self.quadrante = 1
elif 90 < vquad < 180:
self.quadrante = 2
elif 180 < vquad < 270:
self.quadrante = 3
elif 270 < vquad < 360:
self.quadrante = 4
if vquad == 0 or vquad == 90 or vquad == 180 or vquad == 270 or vquad == 360: # Quadrante 0 representa os angulos de resultados indefinidos
self.quadrante = 0
def sen(self):
# DESENHA O SENO
t.left(self.grau)
t.forward(self.raio)
t.pencolor("red")
if self.quadrante == 1:
t.left(180 - self.grau)
self.linha(self.cosseno * self.raio)
t.left(90)
t.forward(self.seno * self.raio)
print (self.seno)
elif self.quadrante == 2:
t.right(self.grau)
self.linha((self.cosseno * self.raio) * -1)
t.right(90)
t.forward(self.seno * self.raio)
print (self.seno)
elif self.quadrante == 3:
t.right(self.grau)
self.linha(self.cosseno * self.raio * -1)
t.left(90)
t.forward(self.seno * self.raio * -1)
print (self.seno)
elif self.quadrante == 4:
t.left(180 - self.grau)
self.linha(self.cosseno * self.raio)
t.left(90)
t.forward(self.seno * self.raio)
print (self.seno)
else:
print("Erro: angulo invalido")
self.reset()
def csen(self):
# DESENHA O COSSENO
t.left(self.grau)
t.forward(self.raio)
t.pencolor("green")
if self.quadrante == 1:
t.right(self.grau + 90)
self.linha(self.seno * self.raio)
t.right(90)
t.forward(self.cosseno * self.raio)
print (self.cosseno)
elif self.quadrante == 2:
t.right(self.grau + 90)
self.linha(self.seno * self.raio)
t.right(90)
t.forward(self.cosseno * self.raio)
print (self.cosseno)
elif self.quadrante == 3:
t.right(self.grau - 90)
self.linha(self.seno * self.raio * -1)
t.right(90)
t.forward(self.cosseno * self.raio * -1)
print (self.cosseno)
elif self.quadrante == 4:
t.right(self.grau - 90)
self.linha(self.seno * self.raio * -1)
t.left(90)
t.forward(self.cosseno * self.raio)
print (self.cosseno)
else:
print("Erro: angulo invalido")
self.reset()
def tan(self):
# DESENHA A TANGENTE
t.left(self.grau)
t.penup()
t.pencolor("blue")
if self.quadrante == 1:
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.right(self.grau + 90)
t.forward(self.tangente * self.raio)
print (self.tangente)
elif self.quadrante == 2:
t.left(180)
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.left(90 - self.grau)
t.forward(self.tangente * self.raio)
print (self.tangente)
elif self.quadrante == 3:
t.left(180)
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.right(self.grau - 90)
t.forward(self.tangente * self.raio)
print (self.tangente)
elif self.quadrante == 4:
t.forward(self.raio)
t.pendown()
self.linha(math.sqrt(((self.tangente*self.raio)**2) + (self.raio**2)) - self.raio)
t.right(90 + self.grau)
t.forward(self.tangente * self.raio)
print (self.tangente)
else:
print("Erro: angulo invalido")
self.reset()
| 3.640625 | 4 |
examples/catapi/feeder.py | IniZio/py-skygear | 8 | 3295 | <gh_stars>1-10
def pick_food(name):
if name == "chima":
return "chicken"
else:
return "dry food"
| 2.96875 | 3 |
esm/model.py | crochereau/esm | 1 | 3296 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import (
TransformerLayer,
LearnedPositionalEmbedding,
SinusoidalPositionalEmbedding,
RobertaLMHead,
ESM1bLayerNorm,
ContactPredictionHead,
)
class ProteinBertModel(nn.Module):
@classmethod
def add_args(cls, parser):
parser.add_argument(
"--num_layers", default=36, type=int, metavar="N", help="number of layers"
)
parser.add_argument(
"--embed_dim", default=1280, type=int, metavar="N", help="embedding dimension"
)
parser.add_argument(
"--logit_bias", action="store_true", help="whether to apply bias to logits"
)
parser.add_argument(
"--ffn_embed_dim",
default=5120,
type=int,
metavar="N",
help="embedding dimension for FFN",
)
parser.add_argument(
"--attention_heads",
default=20,
type=int,
metavar="N",
help="number of attention heads",
)
def __init__(self, args, alphabet):
super().__init__()
self.args = args
self.alphabet_size = len(alphabet)
self.padding_idx = alphabet.padding_idx
self.mask_idx = alphabet.mask_idx
self.cls_idx = alphabet.cls_idx
self.eos_idx = alphabet.eos_idx
if self.args.arch == 'roberta_large':
self.model_version = 'ESM-1b'
self._init_submodules_esm1b()
else:
self.model_version = 'ESM-1'
self._init_submodules_esm1()
def _init_submodules_common(self):
self.embed_tokens = nn.Embedding(
self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx
)
self.layers = nn.ModuleList(
[
TransformerLayer(
self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads,
add_bias_kv=(self.model_version != 'ESM-1b'),
use_esm1b_layer_norm=(self.model_version == 'ESM-1b'),
)
for _ in range(self.args.layers)
]
)
self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads)
def _init_submodules_esm1b(self):
self._init_submodules_common()
self.embed_scale = 1
self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx)
self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)
self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)
self.lm_head = RobertaLMHead(
embed_dim=self.args.embed_dim,
output_dim=self.alphabet_size,
weight=self.embed_tokens.weight
)
def _init_submodules_esm1(self):
self._init_submodules_common()
self.embed_scale = math.sqrt(self.args.embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx)
self.embed_out = nn.Parameter(
torch.zeros((self.alphabet_size, self.args.embed_dim))
)
self.embed_out_bias = None
if self.args.final_bias:
self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))
def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False):
if return_contacts:
need_head_weights = True
assert tokens.ndim == 2
padding_mask = tokens.eq(self.padding_idx) # B, T
x = self.embed_scale * self.embed_tokens(tokens)
if getattr(self.args, 'token_dropout', False):
x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0)
# x: B x T x C
mask_ratio_train = 0.15 * 0.8
src_lengths = (~padding_mask).sum(-1)
mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths
x = x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
x = x + self.embed_positions(tokens)
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_before(x)
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
repr_layers = set(repr_layers)
hidden_representations = {}
if 0 in repr_layers:
hidden_representations[0] = x
if need_head_weights:
attn_weights = []
# (B, T, E) => (T, B, E)
x = x.transpose(0, 1)
if not padding_mask.any():
padding_mask = None
for layer_idx, layer in enumerate(self.layers):
x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights)
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x.transpose(0, 1)
if need_head_weights:
# (H, B, T, T) => (B, H, T, T)
attn_weights.append(attn.transpose(1, 0))
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_after(x)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
# last hidden representation should have layer norm applied
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x
x = self.lm_head(x)
else:
x = F.linear(x, self.embed_out, bias=self.embed_out_bias)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
result = {"logits": x, "representations": hidden_representations}
if need_head_weights:
# attentions: B x L x H x T x T
attentions = torch.stack(attn_weights, 1)
if self.model_version == "ESM-1":
# ESM-1 models have an additional null-token for attention, which we remove
attentions = attentions[..., :-1]
if padding_mask is not None:
attention_mask = (1 - padding_mask.type_as(attentions))
attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2)
attentions = attentions * attention_mask[:, None, None, :, :]
result["attentions"] = attentions
if return_contacts:
contacts = self._predict_contacts_from_token_attentions(tokens, attentions)
result["contacts"] = contacts
return result
def _predict_contacts_from_token_attentions(self, tokens, attentions):
# remove eos token attentions
if tokens[:, -1].eq(self.eos_idx).any():
eos_mask = tokens.ne(self.eos_idx).to(attentions)
eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
attentions = attentions * eos_mask[:, None, None, :, :]
attentions = attentions[..., :-1, :-1]
# remove cls token attentions
if tokens[:, 0].eq(self.cls_idx).all():
attentions = attentions[..., 1:, 1:]
batch_size, layers, heads, seqlen, _ = attentions.size()
attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
return self.contact_head(attentions)
def predict_contacts(self, tokens):
return self(tokens, return_contacts=True)["contacts"]
@property
def num_layers(self):
return self.args.layers
| 1.84375 | 2 |
python/tink/aead/kms_envelope_aead.py | bfloch/tink | 0 | 3297 | <reponame>bfloch/tink
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for envelope encryption with KMS."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
import struct
from tink.proto import tink_pb2
from tink import aead
from tink import core
# Defines in how many bytes the DEK length will be encoded.
DEK_LEN_BYTES = 4
class KmsEnvelopeAead(aead.Aead):
"""Implements envelope encryption.
Envelope encryption generates a data encryption key (DEK) which is used
to encrypt the payload. The DEK is then send to a KMS to be encrypted and
the encrypted DEK is attached to the ciphertext. In order to decrypt the
ciphertext, the DEK first has to be decrypted by the KMS, and then the DEK
can be used to decrypt the ciphertext. For further information see
https://cloud.google.com/kms/docs/envelope-encryption.
The ciphertext structure is as follows:
* Length of the encrypted DEK: 4 bytes (big endian)
* Encrypted DEK: variable length, specified by the previous 4 bytes
* AEAD payload: variable length
"""
def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead):
self.key_template = key_template
self.remote_aead = remote
def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes:
# Get new key from template
dek = core.Registry.new_key_data(self.key_template)
dek_aead = core.Registry.primitive(dek, aead.Aead)
# Encrypt plaintext
ciphertext = dek_aead.encrypt(plaintext, associated_data)
# Wrap DEK key values with remote
encrypted_dek = self.remote_aead.encrypt(dek.value, b'')
# Construct ciphertext, DEK length encoded as big endian
enc_dek_len = struct.pack('>I', len(encrypted_dek))
return enc_dek_len + encrypted_dek + ciphertext
def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes:
ct_len = len(ciphertext)
# Recover DEK length
if ct_len < DEK_LEN_BYTES:
raise core.TinkError
dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0]
# Basic check if DEK length can be valid.
if dek_len > (ct_len - DEK_LEN_BYTES) or dek_len < 0:
raise core.TinkError
# Decrypt DEK with remote AEAD
encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len]
dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'')
# Get AEAD primitive based on DEK
dek = tink_pb2.KeyData()
dek.type_url = self.key_template.type_url
dek.value = dek_bytes
dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC
dek_aead = core.Registry.primitive(dek, aead.Aead)
# Extract ciphertext payload and decrypt
ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:]
return dek_aead.decrypt(ct_bytes, associated_data)
| 2.34375 | 2 |
tests/pyb/can.py | LabAixBidouille/micropython | 0 | 3298 | from pyb import CAN
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send('abcd', 123)
print(can.any(0))
print(can.recv(0))
can.send('abcd', -1)
print(can.recv(0))
can.send('abcd', 0x7FF + 1)
print(can.recv(0))
# Test too long message
try:
can.send('abcdefghi', 0x7FF)
except ValueError:
print('passed')
else:
print('failed')
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe = True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
can.send('abcde', 0x7FF + 1)
except ValueError:
print('failed')
else:
r = can.recv(0)
if r[0] == 0x7FF+1 and r[3] == b'abcde':
print('passed')
else:
print('failed, wrong data received')
del can
# Test RxCallbacks
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))
def cb0(bus, reason):
print('cb0')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1(bus, reason):
print('cb1')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb0a(bus, reason):
print('cb0a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1a(bus, reason):
print('cb1a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
can.rxcallback(0, cb0)
can.rxcallback(1, cb1)
can.send('11111111',1)
can.send('22222222',2)
can.send('33333333',3)
can.rxcallback(0, cb0a)
can.send('44444444',4)
can.send('55555555',5)
can.send('66666666',6)
can.send('77777777',7)
can.rxcallback(1, cb1a)
can.send('88888888',8)
print(can.recv(0))
print(can.recv(0))
print(can.recv(0))
print(can.recv(1))
print(can.recv(1))
print(can.recv(1))
can.send('11111111',1)
can.send('55555555',5)
print(can.recv(0))
print(can.recv(1))
| 2.28125 | 2 |
quarkchain/cluster/tests/test_miner.py | TahiG/pyquarkchain | 17 | 3299 | <filename>quarkchain/cluster/tests/test_miner.py
import asyncio
import time
import unittest
from typing import Optional
from quarkchain.cluster.miner import DoubleSHA256, Miner, MiningWork, validate_seal
from quarkchain.config import ConsensusType
from quarkchain.core import RootBlock, RootBlockHeader
from quarkchain.p2p import ecies
from quarkchain.utils import sha3_256
class TestMiner(unittest.TestCase):
def setUp(self):
super().setUp()
def miner_gen(consensus, create_func, add_func, **kwargs):
m = Miner(
consensus, create_func, add_func, self.get_mining_params, **kwargs
)
m.enabled = True
return m
self.miner_gen = miner_gen
self.added_blocks = []
@staticmethod
def get_mining_params(rounds: Optional[int] = None):
# guarantee target time is hit
ret = {"target_block_time": 0.0, "is_test": True}
if rounds is not None:
ret["rounds"] = rounds
return ret
def test_mine_new_block_normal_case(self):
async def create(retry=True):
if len(self.added_blocks) >= 5:
return None # stop the game
return RootBlock(
RootBlockHeader(create_time=int(time.time())),
tracking_data="{}".encode("utf-8"),
)
async def add(block):
nonlocal miner
self.added_blocks.append(block)
for consensus in (
ConsensusType.POW_SIMULATE,
ConsensusType.POW_ETHASH,
ConsensusType.POW_SHA3SHA3,
):
miner = self.miner_gen(consensus, create, add)
# should generate 5 blocks and then end
loop = asyncio.get_event_loop()
loop.run_until_complete(miner._mine_new_block_async())
self.assertEqual(len(self.added_blocks), 5)
def test_simulate_mine_handle_block_exception(self):
i = 0
async def create(retry=True):
nonlocal i
if i >= 5:
return None
return RootBlock(
RootBlockHeader(create_time=int(time.time())),
tracking_data="{}".encode("utf-8"),
)
async def add(block):
nonlocal i, miner
try:
if i % 2 == 0:
raise Exception("(╯°□°)╯︵ ┻━┻")
else:
self.added_blocks.append(block)
finally:
i += 1
miner = self.miner_gen(ConsensusType.POW_SIMULATE, create, add)
# only 2 blocks can be added
loop = asyncio.get_event_loop()
loop.run_until_complete(miner._mine_new_block_async())
self.assertEqual(len(self.added_blocks), 2)
def test_sha3sha3(self):
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=5),
tracking_data="{}".encode("utf-8"),
)
work = MiningWork(block.header.get_hash_for_mining(), 42, 5)
# only process one block, which is passed in. `None` means termination right after
miner.input_q.put((None, {}))
miner.mine_loop(
work,
{"consensus_type": ConsensusType.POW_SHA3SHA3},
miner.input_q,
miner.output_q,
)
mined_res = miner.output_q.get()
block.header.nonce = mined_res.nonce
validate_seal(block.header, ConsensusType.POW_SHA3SHA3)
def test_qkchash(self):
miner = self.miner_gen(ConsensusType.POW_QKCHASH, None, None)
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=5),
tracking_data="{}".encode("utf-8"),
)
work = MiningWork(block.header.get_hash_for_mining(), 42, 5)
# only process one block, which is passed in. `None` means termination right after
miner.input_q.put((None, {}))
miner.mine_loop(
work,
{"consensus_type": ConsensusType.POW_QKCHASH},
miner.input_q,
miner.output_q,
)
mined_res = miner.output_q.get()
block.header.nonce = mined_res.nonce
block.header.mixhash = mined_res.mixhash
validate_seal(block.header, ConsensusType.POW_QKCHASH)
def test_only_remote(self):
async def go():
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, None, None)
with self.assertRaises(ValueError):
await miner.get_work()
with self.assertRaises(ValueError):
await miner.submit_work(b"", 42, b"")
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_get_work(self):
now = 42
async def create(retry=True):
nonlocal now
return RootBlock(RootBlockHeader(create_time=now, extra_data=b"{}"))
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, None, remote=True)
async def go():
nonlocal now
# no current work, will generate a new one
work = await miner.get_work(now=now)
self.assertEqual(len(work), 3)
self.assertEqual(len(miner.work_map), 1)
h = list(miner.work_map.keys())[0]
self.assertEqual(work.hash, h)
# cache hit
now += 1
work = await miner.get_work(now=now)
self.assertEqual(work.hash, h)
self.assertEqual(len(miner.work_map), 1)
# new work if interval passed
now += 10
work = await miner.get_work(now=now)
self.assertEqual(len(miner.work_map), 2)
self.assertNotEqual(work.hash, h)
# work map cleaned up if too much time passed
now += 100
await miner.get_work(now=now)
self.assertEqual(len(miner.work_map), 1) # only new work itself
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_submit_work(self):
now = 42
block = RootBlock(
RootBlockHeader(create_time=42, extra_data=b"{}", difficulty=5)
)
async def create(retry=True):
return block
async def add(block_to_add):
self.added_blocks.append(block_to_add)
miner = self.miner_gen(ConsensusType.POW_SHA3SHA3, create, add, remote=True)
async def go():
work = await miner.get_work(now=now)
self.assertEqual(work.height, 0)
self.assertEqual(work.difficulty, 5)
# submitted block doesn't exist
res = await miner.submit_work(b"lolwut", 0, sha3_256(b""))
self.assertFalse(res)
solver = DoubleSHA256(work)
sol = solver.mine(100, 200).nonce
self.assertGreater(sol, 100) # ensure non-solution is tried
non_sol = sol - 1
# invalid pow proof
res = await miner.submit_work(work.hash, non_sol, sha3_256(b""))
self.assertFalse(res)
# valid submission, also check internal state afterwards
res = await miner.submit_work(work.hash, sol, sha3_256(b""))
self.assertTrue(res)
self.assertEqual(miner.work_map, {})
self.assertEqual(len(self.added_blocks), 1)
self.assertIsNone(miner.current_work)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_submit_work_with_guardian(self):
now = 42
block = RootBlock(
RootBlockHeader(create_time=42, extra_data=b"{}", difficulty=1000)
)
async def create(retry=True):
return block
async def add(_):
pass
miner = self.miner_gen(
ConsensusType.POW_SHA3SHA3,
create,
add,
remote=True,
# fake pk, will succeed in test but fail in real world when
# adding the block to the root chain
guardian_private_key=ecies.generate_privkey(),
)
async def go():
for i in range(42, 100):
work = await miner.get_work(now=now)
self.assertEqual(work.height, 0)
# guardian: diff 1000 -> 1, any number should work
res = await miner.submit_work(work.hash, i, sha3_256(b""))
self.assertTrue(res)
loop = asyncio.get_event_loop()
loop.run_until_complete(go())
def test_validate_seal_with_adjusted_diff(self):
diff = 1000
block = RootBlock(
RootBlockHeader(create_time=42, difficulty=diff),
tracking_data="{}".encode("utf-8"),
)
block.header.nonce = 0
with self.assertRaises(ValueError):
validate_seal(block.header, ConsensusType.POW_SHA3SHA3)
# significantly lowering the diff should pass
validate_seal(block.header, ConsensusType.POW_SHA3SHA3, adjusted_diff=1)
| 2.34375 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.