max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
generator/database.py | Neotrinost/Neotrinost.ir | 4 | 2000 | import sqlite3
class Database:
def get_connection(self):
return sqlite3.connect("./db.sqlite")
def add_card(self, card_title, card_text, card_link_text, card_link_url):
con = self.get_connection()
cur = con.cursor()
create_table_query = "CREATE TABLE IF NOT EXISTS cards('card_title' VARCHAR," + \
" 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )"
insert_data_query = f"INSERT INTO " + \
f"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})"
try:
cur.execute(create_table_query)
cur.execute(insert_data_query)
con.commit()
except:
print("an error has been occurred !")
| 3.828125 | 4 |
crits/backdoors/forms.py | frbapolkosnik/crits | 22 | 2001 | <gh_stars>10-100
from django import forms
from django.forms.utils import ErrorList
from crits.campaigns.campaign import Campaign
from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form
from crits.core.handlers import get_item_names, get_source_names
from crits.core.user_tools import get_user_organization
from crits.core import form_consts
from crits.vocabulary.relationships import RelationshipTypes
relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]
class AddBackdoorForm(forms.Form):
"""
Django form for adding a Backdoor to CRITs.
"""
error_css_class = 'error'
required_css_class = 'required'
name = forms.CharField(label=form_consts.Backdoor.NAME, required=True)
aliases = forms.CharField(label=form_consts.Backdoor.ALIASES,
required=False)
version = forms.CharField(label=form_consts.Backdoor.VERSION,
required=False)
description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION,
required=False)
campaign = forms.ChoiceField(widget=forms.Select,
label=form_consts.Backdoor.CAMPAIGN,
required=False)
confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE,
required=False)
source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}),
label=form_consts.Backdoor.SOURCE,
required=True)
source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD,
required=False)
source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}),
label=form_consts.Backdoor.SOURCE_REFERENCE,
required=False)
related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)
related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)
relationship_type = forms.ChoiceField(required=False,
label=form_consts.Common.RELATIONSHIP_TYPE,
widget=forms.Select(attrs={'id':'relationship_type'}))
def __init__(self, username, *args, **kwargs):
super(AddBackdoorForm, self).__init__(*args, **kwargs)
self.fields['campaign'].choices = [('', '')] + [
(c.name, c.name) for c in get_item_names(Campaign, True)]
self.fields['confidence'].choices = [
('', ''),
('low', 'low'),
('medium', 'medium'),
('high', 'high')]
self.fields['source'].choices = [
(c.name, c.name) for c in get_source_names(True, True, username)]
self.fields['source'].initial = get_user_organization(username)
self.fields['relationship_type'].choices = relationship_choices
self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO
add_bucketlist_to_form(self)
add_ticket_to_form(self)
def clean(self):
cleaned_data = super(AddBackdoorForm, self).clean()
campaign = cleaned_data.get('campaign')
if campaign:
confidence = cleaned_data.get('confidence')
if not confidence or confidence == '':
self._errors.setdefault('confidence', ErrorList())
self._errors['confidence'].append(u'This field is required if campaign is specified.')
return cleaned_data
| 2.078125 | 2 |
src/aprl/agents/monte_carlo.py | fkamrani/adversarial-policies | 211 | 2002 | <filename>src/aprl/agents/monte_carlo.py
"""Monte Carlo receding horizon control."""
from abc import ABC, abstractmethod
from multiprocessing import Pipe, Process
import gym
from stable_baselines.common.vec_env import CloudpickleWrapper
from aprl.common.mujoco import MujocoState, ResettableEnv
class MujocoResettableWrapper(ResettableEnv, gym.Wrapper):
"""Converts a MujocoEnv into a ResettableEnv.
Note all MuJoCo environments are resettable."""
def __init__(self, env):
"""Wraps a MujocoEnv, adding get_state and set_state methods.
:param env: a MujocoEnv. NOTE: it must not be wrapped in a TimeLimit."""
if hasattr(env, "_max_episode_steps"):
raise TypeError(
"Environment must not have a time limit " "(try passing in env.unwrapped instead)."
)
gym.Wrapper.__init__(self, env)
self.sim = env.unwrapped.sim
def get_state(self):
"""Serializes the qpos and qvel state of the MuJoCo emulator."""
return MujocoState.from_mjdata(self.sim.data).flatten()
def set_state(self, x):
"""Restores qpos and qvel, calling forward() to derive other values."""
state = MujocoState.from_flattened(x, self.sim)
state.set_mjdata(self.sim.data)
self.sim.forward() # put mjData in consistent state
def reset(self):
"""See base class."""
return self.env.reset()
def step(self, a):
"""See base class."""
return self.env.step(a)
class MonteCarlo(ABC):
"""Selects an action for a ResettableEnv by random search. Randomly samples
fixed-length sequences of actions. Evaluates each trajectory in the
environment, resetting the state to the original after each trajectory."""
@abstractmethod
def __init__(self, horizon, trajectories):
"""Constructs a MonteCarlo instance for env.
:param horizon: the length of the trajectories to search over.
:param trajectories: the number of trajectories to evaluate."""
self.horizon = horizon
self.trajectories = trajectories
@abstractmethod
def seed(self, seed):
"""Sets a seed for the PRNG for the action sequences.
:param seed (int): a seed."""
pass
@abstractmethod
def best_action(self, state):
"""Returns the best action out of a random search of action sequences.
Generates self.trajectories action sequences, each of length
self.horizon. The cumulative reward of each action sequence is computed,
starting from state. The function returns the first action and the
cumulative reward of the action sequences with the largest cumulative
reward.
:param state: a value returned by env.get_state().
:return (action, reward): the best action found and associated reward."""
pass
class MonteCarloSingle(MonteCarlo):
"""Selects an action for a ResettableEnv by random search.
See base class for details. This implementation is not parallelized."""
def __init__(self, env, horizon, trajectories):
"""See base class."""
super().__init__(horizon, trajectories)
self.env = env
def seed(self, seed):
"""Sets a seed for the PRNG for the action sequences.
:param seed (int): a seed."""
self.env.action_space.np_random.seed(seed)
def best_action(self, state):
"""Returns the best action out of a random search of action sequences.
See base class for details.
Search takes place in a single environment, which is reset to state
before evaluating each action sequence."""
res = []
for _ in range(self.trajectories):
self.env.set_state(state)
us = [self.env.action_space.sample() for _ in range(self.horizon)]
total_rew = 0
for u in us:
_ob, rew, done, _info = self.env.step(u)
total_rew += rew
if done:
break
res.append((us[0], total_rew))
self.env.set_state(state)
best = max(res, key=lambda x: x[1])
return best
def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories):
parent_remote.close()
dynamics = dynamic_fn_wrapper.var()
dynamics.reset()
mc = MonteCarloSingle(dynamics, horizon, trajectories)
try:
while True:
cmd, x = remote.recv()
if cmd == "seed":
mc.seed(x)
elif cmd == "search":
best_u, best_r = mc.best_action(x)
remote.send((best_u, best_r))
elif cmd == "close":
remote.close()
break
else:
raise NotImplementedError
except KeyboardInterrupt:
print("MonteCarloParallel worker: got KeyboardInterrupt")
finally:
dynamics.close()
class MonteCarloParallel(MonteCarlo):
"""Like MonteCarlo, but performs the random search in parallel."""
# This implementation is inspired by Baselines SubprocVecEnv.
def __init__(self, env_fns, horizon, trajectories, seed=0):
"""Launch subprocess workers and store configuration parameters.
:param env_fns (list<()->ResettableEnv>): list of thunks.
:param horizon (int): length of trajectories to search over.
:param trajectories (int): minimum number of trajectories to evaluate.
It will be rounded up to the nearest multiple of len(make_env)."""
super().__init__(horizon, trajectories)
nremotes = len(env_fns)
# Integer ceiling of self.trajectories / nworkers
traj_per_worker = (self.trajectories - 1) // nremotes + 1
pipes = [Pipe() for _ in range(nremotes)]
self.remotes, self.work_remotes = zip(*pipes)
worker_cfgs = zip(self.work_remotes, self.remotes, env_fns)
self.ps = []
for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs):
args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker)
process = Process(target=_worker, args=args)
process.daemon = True
# If the main process crashes, we should not cause things to hang
process.start()
self.ps.append(process)
for remote in self.work_remotes:
remote.close()
def seed(self, seed):
"""See base class."""
for i, remote in enumerate(self.remotes):
remote.send(("seed", seed + i))
def best_action(self, state):
"""Returns the best action out of a random search of action sequences."""
for remote in self.remotes:
remote.send(("search", state))
results = [remote.recv() for remote in self.remotes]
best = max(results, key=lambda x: x[1])
return best
def close(self):
"""Shuts down parallel workers."""
for remote in self.remotes:
remote.send(("close", None))
for p in self.ps:
p.join()
def receding_horizon(monte_carlo, env):
"""Receding horizon control
:param monte_carlo(MonteCarlo): a Monte Carlo controller for env or a clone of env.
:param env(ResettableEnv): a resettable environment."""
while True:
state = env.get_state()
a, _seq_rew = monte_carlo.best_action(state)
ob, rew, done, info = env.step(a)
yield a, ob, rew, done, info
if done:
break
| 2.5 | 2 |
machineLearnInAction/bayes.py | xuwening/tensorflowDemo | 0 | 2003 | <filename>machineLearnInAction/bayes.py
import numpy as np
def loadDataSet():
postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......]
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0, 1, 0, 1, 0, 1] # 1 is abusive, 0 not
return postingList, classVec
def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print('the word: %s is not in my vocabulary' % word)
return returnVec
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory) / float(numTrainDocs)
p0Num = np.zeros(numWords)
p1Num = np.zeros(numWords)
p0Denom = 0.0
p1Denom = 0.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = p1Num / p1Denom
p0Vect = p0Num / p0Denom
return p0Vect, p1Vect, pAbusive
if __name__ == '__main__':
postinList, classVec = loadDataSet()
myVocabList = createVocabList(postinList)
# print(setOfWords2Vec(myVocabList, postinList[0]))
trainMat = []
for postinDoc in postinList:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
print(trainMat)
p0V, p1V, pAb = trainNB0(trainMat, classVec)
print(p0V, p1V, pAb) | 3.140625 | 3 |
py/debug/__init__.py | segrids/arduino_due | 3 | 2004 | <reponame>segrids/arduino_due<filename>py/debug/__init__.py
from .swd import SWD
from .ahb import AHB
from .debugger import Debugger, HaltError, NotHaltedError
try:
from .dwarf import ELFDebugger
except ImportError:
pass
| 1.265625 | 1 |
HAP-NodeJS/Switch3_1.py | cbdunc2/pi-kit | 0 | 2005 | import subprocess
subprocess.Popen(['sh', '../Switches/Switch3_On.sh'])
| 1.53125 | 2 |
src/cicd_sim/artifact/__init__.py | Software-Natives-OSS/cicd_sim | 0 | 2006 | from . artifactory import Artifactory
__all__ = ['Artifactory']
| 1.070313 | 1 |
mandoline/line_segment3d.py | Spiritdude/mandoline-py | 5 | 2007 |
class LineSegment3D(object):
"""A class to represent a 3D line segment."""
def __init__(self, p1, p2):
"""Initialize with two endpoints."""
if p1 > p2:
p1, p2 = (p2, p1)
self.p1 = p1
self.p2 = p2
self.count = 1
def __len__(self):
"""Line segment always has two endpoints."""
return 2
def __iter__(self):
"""Iterator generator for endpoints."""
yield self.p1
yield self.p2
def __getitem__(self, idx):
"""Given a vertex number, returns a vertex coordinate vector."""
if idx == 0:
return self.p1
if idx == 1:
return self.p2
raise LookupError()
def __hash__(self):
"""Returns hash value for endpoints"""
return hash((self.p1, self.p2))
def __lt__(self, p):
return self < p
def __cmp__(self, p):
"""Compare points for sort ordering in an arbitrary heirarchy."""
val = self[0].__cmp__(p[0])
if val != 0:
return val
return self[1].__cmp__(p[1])
def __format__(self, fmt):
"""Provides .format() support."""
pfx = ""
sep = " - "
sfx = ""
if "a" in fmt:
pfx = "["
sep = ", "
sfx = "]"
elif "s" in fmt:
pfx = ""
sep = " "
sfx = ""
p1 = self.p1.__format__(fmt)
p2 = self.p2.__format__(fmt)
return pfx + p1 + sep + p2 + sfx
def __repr__(self):
"""Standard string representation."""
return "<LineSegment3D: {0}>".format(self)
def __str__(self):
"""Returns a human readable coordinate string."""
return "{0:a}".format(self)
def translate(self,offset):
"""Translate the endpoint's vertices"""
self.p1 = (self.p1[a] + offset[a] for a in range(3))
self.p2 = (self.p2[a] + offset[a] for a in range(3))
def scale(self,scale):
"""Translate the endpoint's vertices"""
self.p1 = (self.p1[a] * scale[a] for a in range(3))
self.p2 = (self.p2[a] * scale[a] for a in range(3))
def length(self):
"""Returns the length of the line."""
return self.p1.distFromPoint(self.p2)
class LineSegment3DCache(object):
"""Cache class for 3D Line Segments."""
def __init__(self):
"""Initialize as an empty cache."""
self.endhash = {}
self.seghash = {}
def _add_endpoint(self, p, seg):
"""Remember that this segment has a given endpoint"""
if p not in self.endhash:
self.endhash[p] = []
self.endhash[p].append(seg)
def rehash(self):
"""Reset the hashes for changed edge vertices"""
oldseghash = self.seghash
self.seghash = {
(v[0], v[1]): v
for v in oldseghash.values()
}
oldendhash = self.endhash
self.endhash = {
k: v
for v in oldendhash.values()
for k in v
}
def translate(self,offset):
"""Translate vertices of all edges."""
for v in self.seghash.values():
v.translate(offset)
self.rehash()
def scale(self,scale):
"""Scale vertices of all edges."""
for v in self.seghash.values():
v.scale(scale)
self.rehash()
def endpoint_segments(self, p):
"""get list of edges that end at point p"""
if p not in self.endhash:
return []
return self.endhash[p]
def get(self, p1, p2):
"""Given 2 endpoints, return the cached LineSegment3D inst, if any."""
key = (p1, p2) if p1 < p2 else (p2, p1)
if key not in self.seghash:
return None
return self.seghash[key]
def add(self, p1, p2):
"""Given 2 endpoints, return the (new or cached) LineSegment3D inst."""
key = (p1, p2) if p1 < p2 else (p2, p1)
if key in self.seghash:
seg = self.seghash[key]
seg.count += 1
return seg
seg = LineSegment3D(p1, p2)
self.seghash[key] = seg
self._add_endpoint(p1, seg)
self._add_endpoint(p2, seg)
return seg
def __iter__(self):
"""Creates an iterator for the line segments in the cache."""
for pt in self.seghash.values():
yield pt
def __len__(self):
"""Length of sequence."""
return len(self.seghash)
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| 3.6875 | 4 |
cacheable/adapter/PeeweeAdapter.py | d1hotpep/cacheable | 0 | 2008 | <reponame>d1hotpep/cacheable
import peewee
import playhouse.kv
from time import time
from . import CacheableAdapter
class PeeweeAdapter(CacheableAdapter, peewee.Model):
key = peewee.CharField(max_length=256, unique=True)
value = playhouse.kv.JSONField()
mtime = peewee.IntegerField(default=time)
ttl = peewee.IntegerField(default=0)
class Meta:
database = peewee.Proxy()
def __init__(self, db_connection, table_name=None):
if table_name:
self._meta.db_table = table_name
self._meta.database.initialize(db_connection)
def multiget(self, keys):
cls = self.__class__
res = self.select(cls.key, cls.value) \
.where(cls.key << keys & self.__ttl_filter()) \
.tuples()
return { x[0] : x[1] for x in res }
@classmethod
def multiset(cls, data, ttl=None):
ts = int(time())
ttl = ttl or 0
kvs = []
for key, value in data.items():
kvs.append({
cls.key : key,
cls.value : value,
cls.mtime : ts,
cls.ttl : ttl,
})
cls.insert_many(kvs).upsert().execute()
def delete(self, key_or_keys):
if list == type(key_or_keys):
keys = key_or_keys
else:
keys = [ key_or_keys ]
cls = self.__class__
peewee.DeleteQuery(cls).where(cls.key << keys).execute()
def list(self, prefix=None, limit=None):
cls = self.__class__
q = self.select(cls.key, cls.value)
if prefix:
if self.__db_type() == peewee.SqliteDatabase:
wildcard = '*'
else:
wildcard = '%'
q = q.where(cls.key % ('%s%s' % (prefix, wildcard)))
q = q.where(self.__ttl_filter())
if limit:
q = q.limit(limit)
res = { x[0] : x[1] for x in q.tuples() }
if prefix:
res = { k[len(prefix):] : v for k, v in res.items() }
return res
def __ttl_filter(self):
"""
Add the TTL where clause to a query, to filter out stale results
"""
ts = int(time())
cls = self.__class__
return cls.ttl == 0 | (cls.mtime + cls.ttl > ts)
def __db_type(self):
return type(self._meta.database.obj)
| 2.4375 | 2 |
mmgen/models/architectures/arcface/helpers.py | plutoyuxie/mmgeneration | 0 | 2009 | from collections import namedtuple
import torch
from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d,
Module, PReLU, ReLU, Sequential, Sigmoid)
# yapf: disable
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa
"""
# yapf: enable
class Flatten(Module):
"""Flatten Module."""
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
"""l2 normalization.
Args:
input (torch.Tensor): The input tensor.
axis (int, optional): Specifies which axis of input to calculate the
norm across. Defaults to 1.
Returns:
Tensor: Tensor after L2 normalization per-instance.
"""
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
"""A named tuple describing a ResNet block."""
def get_block(in_channel, depth, num_units, stride=2):
"""Get a single block config.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
num_units (int): Number of unit modules.
stride (int, optional): Conv2d stride. Defaults to 2.
Returns:
list: A list of unit modules' config.
"""
return [Bottleneck(in_channel, depth, stride)
] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
"""Get block configs of backbone.
Args:
num_layers (int): Number of ConvBlock layers in backbone.
Raises:
ValueError: `num_layers` must be one of [50, 100, 152].
Returns:
list: A list of block configs.
"""
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError(
'Invalid number of layers: {}. Must be one of [50, 100, 152]'.
format(num_layers))
return blocks
class SEModule(Module):
"""Squeeze-and-Excitation Modules.
Args:
channels (int): Input channels.
reduction (int): Intermediate channels reduction ratio.
"""
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(
channels,
channels // reduction,
kernel_size=1,
padding=0,
bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(
channels // reduction,
channels,
kernel_size=1,
padding=0,
bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
"""Forward Function."""
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
"""Intermediate Resblock of bottleneck with SEModule.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth), SEModule(depth, 16))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
| 3.109375 | 3 |
createplaylist.py | mahi0601/SpotifyPlaylist | 47 | 2010 | <reponame>mahi0601/SpotifyPlaylist
import os
from spotifyclient import SpotifyClient
def main():
spotify_client = SpotifyClient(os.getenv("SPOTIFY_AUTHORIZATION_TOKEN"),
os.getenv("SPOTIFY_USER_ID"))
# get last played tracks
num_tracks_to_visualise = int(input("How many tracks would you like to visualise? "))
last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise)
print(f"\nHere are the last {num_tracks_to_visualise} tracks you listened to on Spotify:")
for index, track in enumerate(last_played_tracks):
print(f"{index+1}- {track}")
# choose which tracks to use as a seed to generate a playlist
indexes = input("\nEnter a list of up to 5 tracks you'd like to use as seeds. Use indexes separated by a space: ")
indexes = indexes.split()
seed_tracks = [last_played_tracks[int(index)-1] for index in indexes]
# get recommended tracks based off seed tracks
recommended_tracks = spotify_client.get_track_recommendations(seed_tracks)
print("\nHere are the recommended tracks which will be included in your new playlist:")
for index, track in enumerate(recommended_tracks):
print(f"{index+1}- {track}")
# get playlist name from user and create playlist
playlist_name = input("\nWhat's the playlist name? ")
playlist = spotify_client.create_playlist(playlist_name)
print(f"\nPlaylist '{playlist.name}' was created successfully.")
# populate playlist with recommended tracks
spotify_client.populate_playlist(playlist, recommended_tracks)
print(f"\nRecommended tracks successfully uploaded to playlist '{playlist.name}'.")
if __name__ == "__main__":
main() | 3.75 | 4 |
tests/contrib/flask/test_request.py | thieman/dd-trace-py | 0 | 2011 | # -*- coding: utf-8 -*-
from ddtrace.compat import PY2
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.flask.patch import flask_version
from ddtrace.ext import http
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID
from flask import abort
from . import BaseFlaskTestCase
from ...utils import assert_span_http_status_code
base_exception_name = 'builtins.Exception'
if PY2:
base_exception_name = 'exceptions.Exception'
class FlaskRequestTestCase(BaseFlaskTestCase):
def test_request(self):
"""
When making a request
We create the expected spans
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
spans = self.get_spans()
self.assertEqual(len(spans), 8)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.index',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('flask.endpoint'), 'index')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/')
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/')
assert_span_http_status_code(req_span, 200)
assert http.QUERY_STRING not in req_span.meta
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index')
self.assertEqual(handler_span.resource, '/')
self.assertEqual(req_span.error, 0)
def test_request_query_string_trace(self):
"""Make sure when making a request that we create the expected spans and capture the query string."""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_http_config('flask', dict(trace_query_string=True)):
self.client.get('/?foo=bar&baz=biz')
spans = self.get_spans()
# Request tags
assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz'
def test_analytics_global_on_integration_default(self):
"""
When making a request
When an integration trace search is not event sample rate is not set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=True)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
root.assert_matches(
name='flask.request',
metrics={
ANALYTICS_SAMPLE_RATE_KEY: 1.0,
},
)
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_on_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=True)):
with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
root.assert_matches(
name='flask.request',
metrics={
ANALYTICS_SAMPLE_RATE_KEY: 0.5,
},
)
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_off_integration_default(self):
"""
When making a request
When an integration trace search is not set and sample rate is set and globally trace search is disabled
We expect the root span to not include tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=False)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_off_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is disabled
We expect the root span to have the appropriate tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=False)):
with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
root.assert_matches(
name='flask.request',
metrics={
ANALYTICS_SAMPLE_RATE_KEY: 0.5,
},
)
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_distributed_tracing(self):
"""
When making a request
When distributed tracing headers are present
We create the expected spans
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
# Default: distributed tracing enabled
res = self.client.get('/', headers={
HTTP_HEADER_PARENT_ID: '12345',
HTTP_HEADER_TRACE_ID: '678910',
})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
# Assert parent and trace id are properly set on the root span
span = self.find_span_by_name(self.get_spans(), 'flask.request')
self.assertEqual(span.trace_id, 678910)
self.assertEqual(span.parent_id, 12345)
# Explicitly enable distributed tracing
with self.override_config('flask', dict(distributed_tracing_enabled=True)):
res = self.client.get('/', headers={
HTTP_HEADER_PARENT_ID: '12345',
HTTP_HEADER_TRACE_ID: '678910',
})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
# Assert parent and trace id are properly set on the root span
span = self.find_span_by_name(self.get_spans(), 'flask.request')
self.assertEqual(span.trace_id, 678910)
self.assertEqual(span.parent_id, 12345)
# With distributed tracing disabled
with self.override_config('flask', dict(distributed_tracing_enabled=False)):
res = self.client.get('/', headers={
HTTP_HEADER_PARENT_ID: '12345',
HTTP_HEADER_TRACE_ID: '678910',
})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
# Assert parent and trace id are properly set on the root span
span = self.find_span_by_name(self.get_spans(), 'flask.request')
self.assertNotEqual(span.trace_id, 678910)
self.assertIsNone(span.parent_id)
def test_request_query_string(self):
"""
When making a request
When the request contains a query string
We create the expected spans
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
res = self.client.get('/', query_string=dict(hello='flask'))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
spans = self.get_spans()
self.assertEqual(len(spans), 8)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.index',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
# Note: contains no query string
self.assertEqual(req_span.resource, 'GET /')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('flask.endpoint'), 'index')
# Note: contains no query string
self.assertEqual(req_span.get_tag('flask.url_rule'), '/')
self.assertEqual(req_span.get_tag('http.method'), 'GET')
# Note: contains no query string
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/')
assert_span_http_status_code(req_span, 200)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index')
# Note: contains no query string
self.assertEqual(handler_span.resource, '/')
self.assertEqual(req_span.error, 0)
def test_request_unicode(self):
"""
When making a request
When the url contains unicode
We create the expected spans
"""
@self.app.route(u'/üŋïĉóđē')
def unicode():
return 'üŋïĉóđē', 200
res = self.client.get(u'/üŋïĉóđē')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'\xc3\xbc\xc5\x8b\xc3\xaf\xc4\x89\xc3\xb3\xc4\x91\xc4\x93')
spans = self.get_spans()
self.assertEqual(len(spans), 8)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.unicode',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, u'GET /üŋïĉóđē')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode')
self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē')
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē')
assert_span_http_status_code(req_span, 200)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode')
self.assertEqual(handler_span.resource, u'/üŋïĉóđē')
self.assertEqual(req_span.error, 0)
def test_request_404(self):
"""
When making a request
When the requested endpoint was not found
We create the expected spans
"""
res = self.client.get('/not-found')
self.assertEqual(res.status_code, 404)
spans = self.get_spans()
self.assertEqual(len(spans), 9)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'flask.handle_user_exception',
'flask.handle_http_exception',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET 404')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found')
assert_span_http_status_code(req_span, 404)
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')
def test_request_abort_404(self):
"""
When making a request
When the requested endpoint calls `abort(404)`
We create the expected spans
"""
@self.app.route('/not-found')
def not_found():
abort(404)
res = self.client.get('/not-found')
self.assertEqual(res.status_code, 404)
spans = self.get_spans()
self.assertEqual(len(spans), 10)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.not_found',
'flask.handle_user_exception',
'flask.handle_http_exception',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /not-found')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found')
assert_span_http_status_code(req_span, 404)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found')
self.assertEqual(handler_span.resource, '/not-found')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')
def test_request_500(self):
"""
When making a request
When the requested endpoint raises an exception
We create the expected spans
"""
@self.app.route('/500')
def fivehundred():
raise Exception('500 error')
res = self.client.get('/500')
self.assertEqual(res.status_code, 500)
spans = self.get_spans()
self.assertEqual(len(spans), 9)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundred',
'flask.handle_user_exception',
'flask.handle_exception',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /500')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 1)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500')
assert_span_http_status_code(req_span, 500)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/500')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred')
self.assertEqual(handler_span.resource, '/500')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), base_exception_name)
# User exception span
user_ex_span = spans[5]
self.assertEqual(user_ex_span.service, 'flask')
self.assertEqual(user_ex_span.name, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.error, 1)
self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name)
def test_request_501(self):
"""
When making a request
When the requested endpoint calls `abort(501)`
We create the expected spans
"""
@self.app.route('/501')
def fivehundredone():
abort(501)
res = self.client.get('/501')
self.assertEqual(res.status_code, 501)
spans = self.get_spans()
self.assertEqual(len(spans), 10)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundredone',
'flask.handle_user_exception',
'flask.handle_http_exception',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /501')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 1)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501')
assert_span_http_status_code(req_span, 501)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/501')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented')
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone')
self.assertEqual(handler_span.resource, '/501')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented')
# User exception span
user_ex_span = spans[5]
self.assertEqual(user_ex_span.service, 'flask')
self.assertEqual(user_ex_span.name, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.error, 0)
def test_request_error_handler(self):
"""
When making a request
When the requested endpoint raises an exception
We create the expected spans
"""
@self.app.errorhandler(500)
def error_handler(e):
return 'Whoops', 500
@self.app.route('/500')
def fivehundred():
raise Exception('500 error')
res = self.client.get('/500')
self.assertEqual(res.status_code, 500)
self.assertEqual(res.data, b'Whoops')
spans = self.get_spans()
if flask_version >= (0, 12, 0):
self.assertEqual(len(spans), 11)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundred',
'flask.handle_user_exception',
'flask.handle_exception',
'tests.contrib.flask.test_request.error_handler',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
else:
self.assertEqual(len(spans), 10)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundred',
'flask.handle_user_exception',
'flask.handle_exception',
'tests.contrib.flask.test_request.error_handler',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /500')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 1)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500')
assert_span_http_status_code(req_span, 500)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/500')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred')
self.assertEqual(handler_span.resource, '/500')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), base_exception_name)
# User exception span
user_ex_span = spans[5]
self.assertEqual(user_ex_span.service, 'flask')
self.assertEqual(user_ex_span.name, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.error, 1)
self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name)
| 2.4375 | 2 |
ConvDR/data/preprocess_cast19.py | blazejdolicki/CHEDAR | 1 | 2012 | <filename>ConvDR/data/preprocess_cast19.py
import argparse
from trec_car import read_data
from tqdm import tqdm
import pickle
import os
import json
import copy
from utils.util import NUM_FOLD
def parse_sim_file(filename):
"""
Reads the deduplicated documents file and stores the
duplicate passage ids into a dictionary
"""
sim_dict = {}
lines = open(filename).readlines()
for line in lines:
data = line.strip().split(':')
if len(data[1]) > 0:
sim_docs = data[-1].split(',')
for docs in sim_docs:
sim_dict[docs] = 1
return sim_dict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--car_cbor", type=str)
parser.add_argument("--msmarco_collection", type=str)
parser.add_argument("--duplicate_file", type=str)
parser.add_argument("--cast_dir", type=str)
parser.add_argument("--out_data_dir", type=str)
parser.add_argument("--out_collection_dir", type=str)
args = parser.parse_args()
# INPUT
sim_file = args.duplicate_file
cast_topics_raw_file = os.path.join(args.cast_dir,
"evaluation_topics_v1.0.json")
cast_topics_manual_file = os.path.join(
args.cast_dir, "evaluation_topics_annotated_resolved_v1.0.tsv")
cast_qrels_file = os.path.join(args.cast_dir, "2019qrels.txt")
# OUTPUT
out_topics_file = os.path.join(args.out_data_dir, "eval_topics.jsonl")
out_raw_queries_file = os.path.join(args.out_data_dir, "queries.raw.tsv")
out_manual_queries_file = os.path.join(args.out_data_dir,
"queries.manual.tsv")
out_qrels_file = os.path.join(args.out_data_dir, "qrels.tsv")
car_id_to_idx_file = os.path.join(args.out_collection_dir,
"car_id_to_idx.pickle")
car_idx_to_id_file = os.path.join(args.out_collection_dir,
"car_idx_to_id.pickle")
out_collection_file = os.path.join(args.out_collection_dir,
"collection.tsv")
# 1. Combine TREC-CAR & MS MARCO, remove duplicate passages, assign new ids
car_id_to_idx = {}
car_idx_to_id = []
if os.path.exists(out_collection_file) and os.path.exists(
car_id_to_idx_file) and os.path.exists(car_idx_to_id_file):
print("Preprocessed collection found. Loading car_id_to_idx...")
with open(car_id_to_idx_file, "rb") as f:
car_id_to_idx = pickle.load(f)
else:
sim_dict = parse_sim_file(sim_file)
car_base_id = 10000000
i = 0
with open(out_collection_file, "w", encoding="utf-8") as f: #FIX change 'a' to 'w' in normal run
print("Processing TREC-CAR...")
for para in tqdm(
read_data.iter_paragraphs(open(args.car_cbor, 'rb'))):
car_id = "CAR_" + para.para_id
text = para.get_text()
text = text.replace("\t", " ").replace("\n",
" ").replace("\r", " ")
idx = car_base_id + i
car_id_to_idx[
car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044
car_idx_to_id.append(car_id)
f.write("{}\t{}\n".format(idx, text))
i += 1
print("Processing MS MARCO...")
removed = 0
with open(args.msmarco_collection, "r") as m:
for line in tqdm(m):
marco_id, text = line.strip().split("\t")
if ("MARCO_" + marco_id) in sim_dict:
removed += 1
continue
f.write("{}\t{}\n".format(marco_id, text))
print("Removed " + str(removed) + " passages")
print("Dumping id mappings to {} and {}...".format(car_id_to_idx_file, car_idx_to_id_file))
with open(car_id_to_idx_file, "wb") as f:
pickle.dump(car_id_to_idx, f)
with open(car_idx_to_id_file, "wb") as f:
pickle.dump(car_idx_to_id, f)
# 2. Process queries
print("Processing CAsT utterances...")
with open(cast_topics_raw_file, "r") as fin:
raw_data = json.load(fin)
with open(cast_topics_manual_file, "r") as fin:
annonated_lines = fin.readlines()
out_raw_queries = open(out_raw_queries_file, "w")
out_manual_queries = open(out_manual_queries_file, "w")
all_annonated = {}
for line in annonated_lines:
splitted = line.split('\t')
out_manual_queries.write(line)
topic_query = splitted[0]
query = splitted[1].strip()
topic_id = topic_query.split('_')[0]
query_id = topic_query.split('_')[1]
if topic_id not in all_annonated:
all_annonated[topic_id] = {}
all_annonated[topic_id][query_id] = query
out_manual_queries.close()
topic_number_dict = {}
data = []
for group in raw_data:
topic_number, description, turn, title = str(
group['number']), group.get('description',
''), group['turn'], group.get(
'title', '')
queries = []
for query in turn:
query_number, raw_utterance = str(
query['number']), query['raw_utterance']
queries.append(raw_utterance)
record = {}
record['topic_number'] = topic_number
record['query_number'] = query_number
record['description'] = description
record['title'] = title
record['input'] = copy.deepcopy(queries)
record['target'] = all_annonated[topic_number][query_number]
out_raw_queries.write("{}_{}\t{}\n".format(topic_number,
query_number,
raw_utterance))
if not topic_number in topic_number_dict:
topic_number_dict[topic_number] = len(topic_number_dict)
data.append(record)
out_raw_queries.close()
with open(out_topics_file, 'w') as fout:
for item in data:
json_str = json.dumps(item)
fout.write(json_str + '\n')
# Split eval data into K-fold
topic_per_fold = len(topic_number_dict) // NUM_FOLD
for i in range(NUM_FOLD):
with open(out_topics_file + "." + str(i), 'w') as fout:
for item in data:
idx = topic_number_dict[item['topic_number']]
if idx // topic_per_fold == i:
json_str = json.dumps(item)
fout.write(json_str + '\n')
# 3. Process and convert qrels
print("Processing qrels...")
with open(cast_qrels_file, "r") as oq, open(out_qrels_file, "w") as nq:
for line in oq:
qid, _, pid, rel = line.strip().split()
if pid.startswith("CAR_"):
assert car_id_to_idx[pid] != -1
pid = car_id_to_idx[pid]
elif pid.startswith("MARCO_"):
pid = int(pid[6:])
else:
continue
nq.write(qid + "\t0\t" + str(pid) + "\t" + rel + "\n")
print("End")
| 2.578125 | 3 |
coord_convert/geojson_utils.py | brandonxiang/example-pyQGIS | 3 | 2013 | __doc__ = 'github: https://github.com/brandonxiang/geojson-python-utils'
import math
from coordTransform_utils import wgs84togcj02
from coordTransform_utils import gcj02tobd09
def linestrings_intersect(line1, line2):
"""
To valid whether linestrings from geojson are intersected with each other.
reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js
Keyword arguments:
line1 -- first line geojson object
line2 -- second line geojson object
if(line1 intersects with other) return intersect point array else empty array
"""
intersects = []
for i in range(0, len(line1['coordinates']) - 1):
for j in range(0, len(line2['coordinates']) - 1):
a1_x = line1['coordinates'][i][1]
a1_y = line1['coordinates'][i][0]
a2_x = line1['coordinates'][i + 1][1]
a2_y = line1['coordinates'][i + 1][0]
b1_x = line2['coordinates'][j][1]
b1_y = line2['coordinates'][j][0]
b2_x = line2['coordinates'][j + 1][1]
b2_y = line2['coordinates'][j + 1][0]
ua_t = (b2_x - b1_x) * (a1_y - b1_y) - \
(b2_y - b1_y) * (a1_x - b1_x)
ub_t = (a2_x - a1_x) * (a1_y - b1_y) - \
(a2_y - a1_y) * (a1_x - b1_x)
u_b = (b2_y - b1_y) * (a2_x - a1_x) - (b2_x - b1_x) * (a2_y - a1_y)
if not u_b == 0:
u_a = ua_t / u_b
u_b = ub_t / u_b
if 0 <= u_a and u_a <= 1 and 0 <= u_b and u_b <= 1:
intersects.append({'type': 'Point', 'coordinates': [
a1_x + u_a * (a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})
# if len(intersects) == 0:
# intersects = False
return intersects
def _bbox_around_polycoords(coords):
"""
bounding box
"""
x_all = []
y_all = []
for first in coords[0]:
x_all.append(first[1])
y_all.append(first[0])
return [min(x_all), min(y_all), max(x_all), max(y_all)]
def _point_in_bbox(point, bounds):
"""
valid whether the point is inside the bounding box
"""
return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]
or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])
def _pnpoly(x, y, coords):
"""
the algorithm to judge whether the point is located in polygon
reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation
"""
vert = [[0, 0]]
for coord in coords:
for node in coord:
vert.append(node)
vert.append(coord[0])
vert.append([0, 0])
inside = False
i = 0
j = len(vert) - 1
while i < len(vert):
if ((vert[i][0] > y) != (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])
* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):
inside = not inside
j = i
i += 1
return inside
def _point_in_polygon(point, coords):
inside_box = False
for coord in coords:
if inside_box:
break
if _point_in_bbox(point, _bbox_around_polycoords(coord)):
inside_box = True
if not inside_box:
return False
inside_poly = False
for coord in coords:
if inside_poly:
break
if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):
inside_poly = True
return inside_poly
def point_in_polygon(point, poly):
"""
valid whether the point is located in a polygon
Keyword arguments:
point -- point geojson object
poly -- polygon geojson object
if(point inside poly) return true else false
"""
coords = [poly['coordinates']] if poly[
'type'] == 'Polygon' else poly['coordinates']
return _point_in_polygon(point, coords)
def point_in_multipolygon(point, multipoly):
"""
valid whether the point is located in a mulitpolygon (donut polygon is not supported)
Keyword arguments:
point -- point geojson object
multipoly -- multipolygon geojson object
if(point inside multipoly) return true else false
"""
coords_array = [multipoly['coordinates']] if multipoly[
'type'] == "MultiPolygon" else multipoly['coordinates']
for coords in coords_array:
if _point_in_polygon(point, coords):
return True
return False
def number2radius(number):
"""
convert degree into radius
Keyword arguments:
number -- degree
return radius
"""
return number * math.pi / 180
def number2degree(number):
"""
convert radius into degree
Keyword arguments:
number -- radius
return degree
"""
return number * 180 / math.pi
def draw_circle(radius_in_meters, center_point, steps=15):
"""
get a circle shape polygon based on centerPoint and radius
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
steps = steps if steps > 15 else 15
center = [center_point['coordinates'][1], center_point['coordinates'][0]]
dist = (radius_in_meters / 1000) / 6371
# convert meters to radiant
rad_center = [number2radius(center[0]), number2radius(center[1])]
# 15 sided circle
poly = []
for step in range(0, steps):
brng = 2 * math.pi * step / steps
lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +
math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))
lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)
* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))
poly.append([number2degree(lng), number2degree(lat)])
return {"type": "Polygon", "coordinates": [poly]}
def rectangle_centroid(rectangle):
"""
get the centroid of the rectangle
Keyword arguments:
rectangle -- polygon geojson object
return centroid
"""
bbox = rectangle['coordinates'][0]
xmin = bbox[0][0]
ymin = bbox[0][1]
xmax = bbox[2][0]
ymax = bbox[2][1]
xwidth = xmax - xmin
ywidth = ymax - ymin
return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
def point_distance(point1, point2):
"""
calculate the distance between two point on the sphere like google map
reference http://www.movable-type.co.uk/scripts/latlong.html
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
if(point inside multipoly) return true else false
"""
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point2['coordinates'][0]
lat2 = point2['coordinates'][1]
deg_lat = number2radius(lat2 - lat1)
deg_lon = number2radius(lon2 - lon1)
a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \
math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return (6371 * c) * 1000
def geometry_within_radius(geometry, center, radius):
"""
To valid whether point or linestring or polygon is inside a radius around a center
Keyword arguments:
geometry -- point/linstring/polygon geojson object
center -- point geojson object
radius -- radius
if(geometry inside radius) return true else false
"""
if geometry['type'] == 'Point':
return point_distance(geometry, center) <= radius
elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':
point = {}
# it's enough to check the exterior ring of the Polygon
coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']
for coordinate in coordinates:
point['coordinates'] = coordinate
if point_distance(point, center) > radius:
return False
return True
def area(poly):
"""
calculate the area of polygon
Keyword arguments:
poly -- polygon geojson object
return polygon area
"""
poly_area = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
poly_area += p1_x * p2_y
poly_area -= p1_y * p2_x
j = i
poly_area /= 2
return poly_area
def centroid(poly):
"""
get the centroid of polygon
adapted from http://paulbourke.net/geometry/polyarea/javascript.txt
Keyword arguments:
poly -- polygon geojson object
return polygon centroid
"""
f_total = 0
x_total = 0
y_total = 0
# TODO: polygon holes at coordinates[1]
points = poly['coordinates'][0]
j = len(points) - 1
count = len(points)
for i in range(0, count):
p1_x = points[i][1]
p1_y = points[i][0]
p2_x = points[j][1]
p2_y = points[j][0]
f_total = p1_x * p2_y - p2_x * p1_y
x_total += (p1_x + p2_x) * f_total
y_total += (p1_y + p2_y) * f_total
j = i
six_area = area(poly) * 6
return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}
def destination_point(point, brng, dist):
"""
Calculate a destination Point base on a base point and a distance
Keyword arguments:
pt -- polygon geojson object
brng -- an angle in degrees
dist -- distance in Kilometer between destination and base point
return destination point object
"""
dist = float(dist) / 6371 # convert dist to angular distance in radians
brng = number2radius(brng)
lon1 = number2radius(point['coordinates'][0])
lat1 = number2radius(point['coordinates'][1])
lat2 = math.asin(math.sin(lat1) * math.cos(dist) +
math.cos(lat1) * math.sin(dist) * math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *
math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))
lon2 = (lon2 + 3 * math.pi) % (2 * math.pi) - math.pi # normalise to -180 degree +180 degree
return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}
def simplify(source, kink=20):
"""
source[] array of geojson points
kink in metres, kinks above this depth kept
kink depth is the height of the triangle abc where a-b and b-c are two consecutive line segments
"""
source_coord = map(lambda o: {"lng": o.coordinates[0], "lat": o.coordinates[1]}, source)
# count, n_stack, n_dest, start, end, i, sig;
# dev_sqr, max_dev_sqr, band_sqr;
# x12, y12, d12, x13, y13, d13, x23, y23, d23;
F = (math.pi / 180.0) * 0.5
index = [] # aray of indexes of source points to include in the reduced line
sig_start = [] # indices of start & end of working section
sig_end = []
# check for simple cases
count = len(source_coord)
if count < 3:
return source_coord # one or two points
# more complex case. initialize stack
band_sqr = kink * 360.0 / (2.0 * math.pi * 6378137.0) # Now in degrees
band_sqr *= band_sqr
n_dest = 0
sig_start[0] = 0
sig_end[0] = count - 1
n_stack = 1
# while the stack is not empty
while n_stack > 0:
# ... pop the top-most entries off the stacks
start = sig_start[n_stack - 1]
end = sig_end[n_stack - 1]
n_stack -= 1
if (end - start) > 1: #any intermediate points ?
# ... yes, so find most deviant intermediate point to either side of line joining start & end points
x12 = source[end]["lng"] - source[start]["lng"]
y12 = source[end]["lat"] - source[start]["lat"]
if math.fabs(x12) > 180.0:
x12 = 360.0 - math.fabs(x12)
x12 *= math.cos(F * (source[end]["lat"] + source[start]["lat"])) # use avg lat to reduce lng
d12 = (x12 * x12) + (y12 * y12)
i = start + 1
sig = start
max_dev_sqr = -1.0
while i < end:
x13 = source[i]["lng"] - source[start]["lng"]
y13 = source[i]["lat"] - source[start]["lat"]
if math.fabs(x13) > 180.0:
x13 = 360.0 - math.fabs(x13)
x13 *= math.cos(F * (source[i]["lat"] + source[start]["lat"]))
d13 = (x13 * x13) + (y13 * y13)
x23 = source[i]["lng"] - source[end]["lng"]
y23 = source[i]["lat"] - source[end]["lat"]
if math.fabs(x23) > 180.0:
x23 = 360.0 - math.fabs(x23)
x23 *= math.cos(F * (source[i]["lat"] + source[end]["lat"]))
d23 = (x23 * x23) + (y23 * y23)
if d13 >= (d12 + d23):
dev_sqr = d23
elif d23 >= (d12 + d13):
dev_sqr = d13
else:
dev_sqr = (x13 * y12 - y13 * x12) * (x13 * y12 - y13 * x12) / d12 # solve triangle
if dev_sqr > max_dev_sqr:
sig = i
max_dev_sqr = dev_sqr
i += 1
if max_dev_sqr < band_sqr: # is there a sig. intermediate point ?
#... no, so transfer current start point
index[n_dest] = start
n_dest += 1
else: # ... yes, so push two sub-sections on stack for further processing
n_stack += 1
sig_start[n_stack - 1] = sig
sig_end[n_stack - 1] = end
n_stack += 1
sig_start[n_stack - 1] = start
sig_end[n_stack - 1] = sig
else: # ... no intermediate points, so transfer current start point
index[n_dest] = start
n_dest += 1
# transfer last point
index[n_dest] = count - 1
n_dest += 1
# make return array
r = []
for i in range(0, n_dest):
r.append(source_coord[index[i]])
return map(lambda o: {"type": "Point","coordinates": [o.lng, o.lat]}, r)
def wgs2gcj(geometry):
"""
convert wgs84 to gcj
referencing by https://github.com/wandergis/coordTransform_py
"""
# TODO: point linestring point
if geometry['type'] == 'MultiLineString':
coordinates = geometry['coordinates']
for lines in coordinates:
for line in lines:
line[0], line[1] = wgs84togcj02(line[0], line[1])
return geometry
def gcj2bd(geometry):
"""
convert gcj to bd
referencing by https://github.com/wandergis/coordTransform_py
"""
# TODO: point linestring point
if geometry['type'] == 'MultiLineString':
coordinates = geometry['coordinates']
for lines in coordinates:
for line in lines:
line[0], line[1] = gcj02tobd09(line[0], line[1])
return geometry
| 3.09375 | 3 |
config.py | Rinku92/Mini_Project3 | 0 | 2014 | import os
'''
user = os.environ['POSTGRES_USER']
password = os.environ['<PASSWORD>']
host = os.environ['POSTGRES_HOST']
database = os.environ['POSTGRES_DB']
port = os.environ['POSTGRES_PORT']
'''
user = 'test'
password = 'password'
host = 'localhost'
database = 'example'
port = '5432'
DATABASE_CONNECTION_URI = f'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}' | 2.25 | 2 |
10_days_of_statistics_8_1.py | sercangul/HackerRank | 0 | 2015 | <reponame>sercangul/HackerRank
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 19:26:47 2019
@author: sercangul
"""
n = 5
xy = [map(int, input().split()) for _ in range(n)]
sx, sy, sx2, sxy = map(sum, zip(*[(x, y, x**2, x * y) for x, y in xy]))
b = (n * sxy - sx * sy) / (n * sx2 - sx**2)
a = (sy / n) - b * (sx / n)
print('{:.3f}'.format(a + b * 80)) | 3.109375 | 3 |
rlutils/gym/envs/reset_obs/hopper.py | vermouth1992/rl-util | 0 | 2016 | import gym.envs.mujoco.hopper as hopper
import numpy as np
class HopperEnv(hopper.HopperEnv):
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
])
def reset_obs(self, obs):
state = np.insert(obs, 0, 0.)
qpos = state[:self.model.nq]
qvel = state[self.model.nq:]
self.set_state(qpos, qvel)
return self._get_obs()
| 2.359375 | 2 |
reco_utils/recommender/deeprec/io/iterator.py | yutian-zhao/recommenders | 0 | 2017 | <reponame>yutian-zhao/recommenders
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
# import tensorflow as tf
import abc
class BaseIterator(object):
@abc.abstractmethod
def parser_one_line(self, line):
pass
@abc.abstractmethod
def load_data_from_file(self, infile):
pass
@abc.abstractmethod
def _convert_data(self, labels, features):
pass
@abc.abstractmethod
def gen_feed_dict(self, data_dict):
pass
# class FFMTextIterator(BaseIterator):
# """Data loader for FFM format based models, such as xDeepFM.
# Iterator will not load the whole data into memory. Instead, it loads data into memory
# per mini-batch, so that large files can be used as input data.
# """
# def __init__(self, hparams, graph, col_spliter=" ", ID_spliter="%"):
# """Initialize an iterator. Create necessary placeholders for the model.
# Args:
# hparams (obj): Global hyper-parameters. Some key settings such as #_feature and #_field are there.
# graph (obj): the running graph. All created placeholder will be added to this graph.
# col_spliter (str): column splitter in one line.
# ID_spliter (str): ID splitter in one line.
# """
# self.feature_cnt = hparams.FEATURE_COUNT
# self.field_cnt = hparams.FIELD_COUNT
# self.col_spliter = col_spliter
# self.ID_spliter = ID_spliter
# self.batch_size = hparams.batch_size
# self.graph = graph
# with self.graph.as_default():
# self.labels = tf.placeholder(tf.float32, [None, 1], name="label")
# self.fm_feat_indices = tf.placeholder(
# tf.int64, [None, 2], name="fm_feat_indices"
# )
# self.fm_feat_values = tf.placeholder(
# tf.float32, [None], name="fm_feat_values"
# )
# self.fm_feat_shape = tf.placeholder(tf.int64, [None], name="fm_feat_shape")
# self.dnn_feat_indices = tf.placeholder(
# tf.int64, [None, 2], name="dnn_feat_indices"
# )
# self.dnn_feat_values = tf.placeholder(
# tf.int64, [None], name="dnn_feat_values"
# )
# self.dnn_feat_weights = tf.placeholder(
# tf.float32, [None], name="dnn_feat_weights"
# )
# self.dnn_feat_shape = tf.placeholder(
# tf.int64, [None], name="dnn_feat_shape"
# )
# def parser_one_line(self, line):
# """Parse one string line into feature values.
# Args:
# line (str): a string indicating one instance
# Returns:
# list: Parsed results,including label, features and impression_id
# """
# impression_id = 0
# words = line.strip().split(self.ID_spliter)
# if len(words) == 2:
# impression_id = words[1].strip()
# cols = words[0].strip().split(self.col_spliter)
# label = float(cols[0])
# features = []
# for word in cols[1:]:
# if not word.strip():
# continue
# tokens = word.split(":")
# features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])])
# return label, features, impression_id
# def load_data_from_file(self, infile):
# """Read and parse data from a file.
# Args:
# infile (str): text input file. Each line in this file is an instance.
# Returns:
# obj: An iterator that will yields parsed results, in the format of graph feed_dict.
# """
# label_list = []
# features_list = []
# impression_id_list = []
# cnt = 0
# with tf.gfile.GFile(infile, "r") as rd:
# for line in rd:
# label, features, impression_id = self.parser_one_line(line)
# features_list.append(features)
# label_list.append(label)
# impression_id_list.append(impression_id)
# cnt += 1
# if cnt == self.batch_size:
# res = self._convert_data(label_list, features_list)
# yield self.gen_feed_dict(res), impression_id_list, self.batch_size
# label_list = []
# features_list = []
# impression_id_list = []
# cnt = 0
# if cnt > 0:
# res = self._convert_data(label_list, features_list)
# yield self.gen_feed_dict(res), impression_id_list, cnt
# def _convert_data(self, labels, features):
# """Convert data into numpy arrays that are good for further operation.
# Args:
# labels (list): a list of ground-truth labels.
# features (list): a 3-dimensional list, carrying a list (batch_size) of feature array,
# where each feature array is a list of [field_idx, feature_idx, feature_value] tuple.
# Returns:
# dict: A dictionary, contains multiple numpy arrays that are convenient for further operation.
# """
# dim = self.feature_cnt
# FIELD_COUNT = self.field_cnt
# instance_cnt = len(labels)
# fm_feat_indices = []
# fm_feat_values = []
# fm_feat_shape = [instance_cnt, dim]
# dnn_feat_indices = []
# dnn_feat_values = []
# dnn_feat_weights = []
# dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1]
# for i in range(instance_cnt):
# m = len(features[i])
# dnn_feat_dic = {}
# for j in range(m):
# fm_feat_indices.append([i, features[i][j][1]])
# fm_feat_values.append(features[i][j][2])
# if features[i][j][0] not in dnn_feat_dic:
# dnn_feat_dic[features[i][j][0]] = 0
# else:
# dnn_feat_dic[features[i][j][0]] += 1
# dnn_feat_indices.append(
# [
# i * FIELD_COUNT + features[i][j][0],
# dnn_feat_dic[features[i][j][0]],
# ]
# )
# dnn_feat_values.append(features[i][j][1])
# dnn_feat_weights.append(features[i][j][2])
# if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]:
# dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]]
# dnn_feat_shape[1] += 1
# sorted_index = sorted(
# range(len(dnn_feat_indices)),
# key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]),
# )
# res = {}
# res["fm_feat_indices"] = np.asarray(fm_feat_indices, dtype=np.int64)
# res["fm_feat_values"] = np.asarray(fm_feat_values, dtype=np.float32)
# res["fm_feat_shape"] = np.asarray(fm_feat_shape, dtype=np.int64)
# res["labels"] = np.asarray([[label] for label in labels], dtype=np.float32)
# res["dnn_feat_indices"] = np.asarray(dnn_feat_indices, dtype=np.int64)[
# sorted_index
# ]
# res["dnn_feat_values"] = np.asarray(dnn_feat_values, dtype=np.int64)[
# sorted_index
# ]
# res["dnn_feat_weights"] = np.asarray(dnn_feat_weights, dtype=np.float32)[
# sorted_index
# ]
# res["dnn_feat_shape"] = np.asarray(dnn_feat_shape, dtype=np.int64)
# return res
# def gen_feed_dict(self, data_dict):
# """Construct a dictionary that maps graph elements to values.
# Args:
# data_dict (dict): a dictionary that maps string name to numpy arrays.
# Returns:
# dict: a dictionary that maps graph elements to numpy arrays.
# """
# feed_dict = {
# self.labels: data_dict["labels"],
# self.fm_feat_indices: data_dict["fm_feat_indices"],
# self.fm_feat_values: data_dict["fm_feat_values"],
# self.fm_feat_shape: data_dict["fm_feat_shape"],
# self.dnn_feat_indices: data_dict["dnn_feat_indices"],
# self.dnn_feat_values: data_dict["dnn_feat_values"],
# self.dnn_feat_weights: data_dict["dnn_feat_weights"],
# self.dnn_feat_shape: data_dict["dnn_feat_shape"],
# }
# return feed_dict
| 2.375 | 2 |
HW6/YuliiaKutsyk/3_ unfinished_loop_bug_fixing.py | kolyasalubov/Lv-677.PythonCore | 0 | 2018 | def create_array(n):
res=[]
i=1
while i<=n:
res.append(i)
i += 1
return res
| 3.21875 | 3 |
ncm/api.py | SDhuangao/netease-cloud-music-dl | 0 | 2019 | <reponame>SDhuangao/netease-cloud-music-dl
# -*- coding: utf-8 -*-
import requests
from ncm.encrypt import encrypted_request
from ncm.constants import headers
from ncm.constants import song_download_url
from ncm.constants import get_song_url
from ncm.constants import get_album_url
from ncm.constants import get_artist_url
from ncm.constants import get_playlist_url
class CloudApi(object):
def __init__(self, timeout=30):
super().__init__()
self.session = requests.session()
self.session.headers.update(headers)
self.timeout = timeout
def get_request(self, url):
response = self.session.get(url, timeout=self.timeout)
result = response.json()
if result['code'] != 200:
print('Return {} when try to get {}'.format(result, url))
else:
return result
def post_request(self, url, params):
data = encrypted_request(params)
response = self.session.post(url, data=data, timeout=self.timeout)
result = response.json()
if result['code'] != 200:
print('Return {} when try to post {} => {}'.format(result, params, url))
else:
return result
def get_song(self, song_id):
"""
Get song info by song id
:param song_id:
:return:
"""
url = get_song_url(song_id)
result = self.get_request(url)
return result['songs'][0]
def get_album_songs(self, album_id):
"""
Get all album songs info by album id
:param album_id:
:return:
"""
url = get_album_url(album_id)
result = self.get_request(url)
return result['album']['songs']
def get_song_url(self, song_id, bit_rate=320000):
"""Get a song's download url.
:params song_id: song id<int>.
:params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000}
:return:
"""
url = song_download_url
csrf = ''
params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf}
result = self.post_request(url, params)
song_url = result['data'][0]['url']
return song_url
def get_hot_songs(self, artist_id):
"""
Get a artist 50 hot songs
:param artist_id:
:return:
"""
url = get_artist_url(artist_id)
result = self.get_request(url)
return result['hotSongs']
def get_playlist_songs(self, playlist_id):
"""
Get a public playlist all songs
:param playlist_id:
:return:
"""
url = get_playlist_url(playlist_id)
result = self.get_request(url)
return result['playlist']['trackIds'], result['playlist']['name']
| 2.640625 | 3 |
book/trees/binary_search_tree.py | Web-Dev-Collaborative/algos | 153 | 2020 | # -*- coding: utf-8 -*-
"""
The `TreeNode` class provides many helper functions that make the work
done in the `BinarySearchTree` class methods much easier. The
constructor for a `TreeNode`, along with these helper functions, is
shown below. As you can see, many of these helper functions help to
classify a node according to its own position as a child, (left or
right) and the kind of children the node has. The `TreeNode` class will
also explicitly keep track of the parent as an attribute of each node.
You will see why this is important when we discuss the implementation
for the `del` operator.
One of the more interesting methods of `TreeNode` provides an interface
to simply iterate over all the keys in the tree in order. You already
know how to traverse a binary tree in order, using the `inorder`
traversal algorithm. However, because we want our iterator to operate
lazily, in this case we use the `yield` keyword to define our `__iter__`
method as a Python generator. Pay close attention to the `__iter__`
implementation as at first glance you might think that the code is
not recursive: in fact, because `__iter__` overrides the `for x
in` operation for iteration, it really is recursive!
Our full implementation of `TreeNode` is provided below. It includes
three further methods `find_successor`, `find_min` and `splice_out`
which you can ignore for now as we will return to them later when
discussing deletion.
"""
class TreeNode(object):
def __init__(self, key, val, left=None, right=None, parent=None):
self.key = key
self.val = val
self.left = left
self.right = right
self.parent = parent
def is_left_child(self):
return self.parent and self.parent.left == self
def is_right_child(self):
return self.parent and self.parent.right == self
def is_leaf(self):
return not (self.right or self.left)
def has_any_children(self):
return self.right or self.left
def has_both_children(self):
return self.right and self.left
def has_one_child(self):
return self.has_any_children() and not self.has_both_children()
def replace_node_data(self, key, val, left, right):
self.key = key
self.val = val
self.left = left
self.right = right
if self.left:
self.left.parent = self
if self.right:
self.right.parent = self
def __iter__(self):
if self is None:
return
if self.left:
# `in` calls `__iter__` so is recursive
for elem in self.left:
yield elem
yield self.key
if self.right:
# recurse again
for elem in self.right:
yield elem
def find_successor(self):
if self.right:
return self.right.find_min()
if self.parent is None:
return None
if self.is_left_child():
return self.parent
self.parent.right = None
successor = self.parent.find_successor()
self.parent.right = self
return successor
def find_min(self):
current = self
while current.left:
current = current.left
return current
def splice_out(self):
if self.is_leaf():
if self.is_left_child():
self.parent.left = None
else:
self.parent.right = None
else:
promoted_node = self.left or self.right
if self.is_left_child():
self.parent.left = promoted_node
else:
self.parent.right = promoted_node
promoted_node.parent = self.parent
"""
Now that we have our `TreeNode` class we can begin to write
`BinarySearchTree` itself. Recall that the core functionality of this
class will be to enable `put`ing to and `get`ing from the tree, so we
begin our implementation with the `put` functionality.
In order to enable the `tree[1] = 'foo'` style assignment interface for
our `BinarySearchTree` instances, we override the `__setitem__` magic
method. In this method we first check to see if the tree already has a
root. If there is not a root then we create a new `TreeNode` and set it
as the root of the tree. If a root node is already in place then `put`
calls the private, recursive, helper function `_put` to search the tree
according to the following algorithm:
- Starting at the root of the tree, search the binary tree comparing
the new key to the key in the current node. If the new key is less
than the current node, search the left subtree. If the new key is
greater than the current node, search the right subtree.
- When there is no left (or right) child to search, we have found the
position in the tree where the new node should be installed.
- To add a node to the tree, create a new `TreeNode` object and insert
the object at the point discovered in the previous step.
The code below shows the Python code for inserting a new
node in the tree. The `_put` function is written recursively following
the steps outlined above. Notice that when a new child is inserted into
the tree, the `node` is passed to the new tree as the parent.
One important problem with our implementation of insert is that
duplicate keys are not handled properly. As our tree is implemented a
duplicate key will create a new node with the same key value in the
right subtree of the node having the original key. The result of this is
that the node with the new key will never be found during a search. A
better way to handle the insertion of a duplicate key is for the value
associated with the new key to replace the old value. We leave fixing
this bug as an exercise for you.
"""
class BinarySearchTree(object):
TreeNodeClass = TreeNode
def __init__(self):
self.root = None
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
return self.root.__iter__()
def __setitem__(self, key, val):
if self.root:
self._put(key, val, self.root)
else:
self.root = self.TreeNodeClass(key, val)
self.size = self.size + 1
def _put(self, key, val, node):
if key < node.key:
if node.left:
self._put(key, val, node.left)
else:
node.left = self.TreeNodeClass(key, val, parent=node)
else:
if node.right:
self._put(key, val, node.right)
else:
node.right = self.TreeNodeClass(key, val, parent=node)
"""
The diagram below illustrates the process for inserting a new
node into a binary search tree. The lightly shaded nodes indicate the
nodes that were visited during the insertion process.

Once the tree is constructed, the next task is to implement the
retrieval of a value for a given key. The `get` functionality is even easier
than the `put` functionality because we simply search the tree recursively
until we get to a non-matching leaf node or find a matching key. When
a matching key is found, the value stored in the val of the node is
returned.
Again, inorder to enable a `tree[1]` retrieval interface, we overload
one of Python’s magic methods—in this case `__getitem__`. Just like with
`__setitem__`, the primary purpose of this method is to handle presence
and absence of a root node, and delegates the core `get` functionality
to `_get`.
The search code in the `_get` method uses the same logic
for choosing the left or right child as the `_put` method. Notice that
the `_get` method returns a `TreeNode` to `__getitem__`, this allows `_get` to
be used as a flexible helper method for other `BinarySearchTree` methods
that may need to make use of other data from the `TreeNode` besides the
val.
"""
def __getitem__(self, key):
if self.root:
result = self._get(key, self.root)
if result:
return result.val
raise KeyError
def _get(self, key, node):
if not node:
return None
if node.key == key:
return node
if key < node.key:
return self._get(key, node.left)
return self._get(key, node.right)
"""
Using `_get`, we can implement the `in` operation by writing a
`__contains__` method for the `BinarySearchTree`. The `__contains__`
method will simply call `_get` and return `True` if `_get` returns a
value, or `False` if it returns `None`. The code for `__contains__` is
shown below.
"""
def __contains__(self, key):
return bool(self._get(key, self.root))
"""
Finally, we turn our attention to the most challenging method in the
binary search tree: the deletion of a key. The first task is
to find the node to delete by searching the tree. If the tree has more
than one node we search using the `_get` method to find the `TreeNode`
that needs to be removed. If the tree only has a single node, that means
we are removing the root of the tree, but we still must check to make
sure the key of the root matches the key that is to be deleted. In
either case if the key is not found the `del` operator raises an error.
"""
def delete(self, key):
if self.size > 1:
node_to_remove = self._get(key, self.root)
if node_to_remove:
self.remove(node_to_remove)
self.size = self.size - 1
return
elif self.size == 1 and self.root.key == key:
self.root = None
self.size = self.size - 1
return
raise KeyError('Error, key not in tree')
def __delitem__(self, key):
self.delete(key)
"""
Once we’ve found the node containing the key we want to delete, there
are three cases that we must consider:
1. The node to be deleted has no children
2. The node to be deleted has only one child
3. The node to be deleted has two children
The first case is straightforward. If
the current node has no children all we need to do is delete the node
and remove the reference to this node in the parent. The code for this
case is shown below.
"""
def remove(self, node):
if node.is_leaf() and node.parent is not None:
if node == node.parent.left:
node.parent.left = None
else:
node.parent.right = None
"""

The second case is only slightly more complicated (see below). If a node
has only a single child, then we can simply promote the child to take
the place of its parent. The code for this case is shown in the next
code sample. As you look at this code you will see that there are six
cases to consider. Since the cases are symmetric with respect to either
having a left or right child we will just discuss the case where the
current node has a left child. The decision proceeds as follows:
1. If the current node is a left child then we only need to update the
parent reference of the left child to point to the parent of the
current node, and then update the left child reference of the parent
to point to the current node’s left child.
2. If the current node is a right child then we only need to update the
parent reference of the right child to point to the parent of the
current node, and then update the right child reference of the
parent to point to the current node’s right child.
3. If the current node has no parent, it must be the root. In this case
we will just replace the `key`, `val`, `left`, and
`right` data by calling the `replace_node_data` method on
the root.
Code for this decision process may look like:
"""
elif node.has_one_child():
promoted_node = node.left or node.right
if node.is_left_child():
promoted_node.parent = node.parent
node.parent.left = promoted_node
elif node.is_right_child():
promoted_node.parent = node.parent
node.parent.right = promoted_node
else:
node.replace_node_data(
promoted_node.key,
promoted_node.val,
promoted_node.left,
promoted_node.right
)
"""

The third case is the most difficult case to handle (see below). If a
node has two children, then it is unlikely that we can simply promote
one of them to take the node’s place. We can, however, search the tree
for a node that can be used to replace the one scheduled for deletion.
What we need is a node that will preserve the binary search tree
relationships for both of the existing left and right subtrees. The node
that will do this is the node that has the next-largest key in the tree.
We call this node the **successor**, and we will look at a way to find
the successor shortly. The successor is guaranteed to have no more than
one child, so we know how to remove it using the two cases for deletion
that we have already implemented. Once the successor has been removed,
we simply put it in the tree in place of the node to be deleted.

The code to handle the third case is shown below. Notice
that we make use of the helper methods `find_successor` and `find_min` to
find the successor. To remove the successor, we make use of the method
`splice_out`. The reason we use `splice_out` is that it goes directly to
the node we want to splice out and makes the right changes. We could
call `delete` recursively, but then we would waste time re-searching for
the key node.
"""
else: # has both children
successor = node.find_successor()
if successor:
successor.splice_out()
node.key = successor.key
node.val = successor.val
"""
The code to find the successor is shown above and as you can see is a
method of the `TreeNode` class. This code makes use of the same
properties of binary search trees that cause an inorder traversal to
print out the nodes in the tree from smallest to largest. There are
three cases to consider when looking for the successor:
1. If the node has a right child, then the successor is the smallest
key in the right subtree.
2. If the node has no right child and is the left child of its parent,
then the parent is the successor.
3. If the node is the right child of its parent, and itself has no
right child, then the successor to this node is the successor of its
parent, excluding this node.
The first condition is the only one that matters for us when deleting a
node from a binary search tree.
The `find_min` method is called to find the minimum key in a subtree. You
should convince yourself that the minimum valued key in any binary
search tree is the leftmost child of the tree. Therefore the `find_min`
method simply follows the `left` references in each node of the
subtree until it reaches a node that does not have a left child.
"""
| 4.125 | 4 |
fire/core.py | adamruth/python-fire | 1 | 2021 | <filename>fire/core.py
# Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python Fire is a library for creating CLIs from absolutely any Python object.
You can call Fire on any Python object:
functions, classes, modules, objects, dictionaries, lists, tuples, etc.
They all work!
Python Fire turns any Python object into a command line interface.
Simply call the Fire function as your main method to create a CLI.
When using Fire to build a CLI, your main method includes a call to Fire. Eg:
def main(argv):
fire.Fire(Component)
A Fire CLI command is run by consuming the arguments in the command in order to
access a member of current component, call the current component (if it's a
function), or instantiate the current component (if it's a class). The target
component begins as Component, and at each operation the component becomes the
result of the preceding operation.
For example "command fn arg1 arg2" might access the "fn" property of the initial
target component, and then call that function with arguments 'arg1' and 'arg2'.
Additional examples are available in the examples directory.
Fire Flags, common to all Fire CLIs, must go after a separating "--". For
example, to get help for a command you might run: `command -- --help`.
The available flags for all Fire CLIs are:
-v --verbose: Include private members in help and usage information.
-h --help: Provide help and usage information for the command.
-i --interactive: Drop into a Python REPL after running the command.
--completion: Write the Bash completion script for the tool to stdout.
--separator SEPARATOR: Use SEPARATOR in place of the default separator, '-'.
--trace: Get the Fire Trace for the command.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import json
import os
import pipes
import shlex
import sys
import types
from fire import completion
from fire import decorators
from fire import helputils
from fire import inspectutils
from fire import interact
from fire import parser
from fire import trace
import six
def Fire(component=None, command=None, name=None):
"""This function, Fire, is the main entrypoint for Python Fire.
Executes a command either from the `command` argument or from sys.argv by
recursively traversing the target object `component`'s members consuming
arguments, evaluating functions, and instantiating classes as it goes.
When building a CLI with Fire, your main method should call this function.
Args:
component: The initial target component.
command: Optional. If supplied, this is the command executed. If not
supplied, then the command is taken from sys.argv instead. This can be
a string or a list of strings; a list of strings is preferred.
name: Optional. The name of the command as entered at the command line.
Used in interactive mode and for generating the completion script.
Returns:
The result of executing the Fire command. Execution begins with the initial
target component. The component is updated by using the command arguments
to either access a member of the current component, call the current
component (if it's a function), or instantiate the current component (if
it's a class). When all arguments are consumed and there's no function left
to call or class left to instantiate, the resulting current component is
the final result.
Raises:
ValueError: If the command argument is supplied, but not a string or a
sequence of arguments.
FireExit: When Fire encounters a FireError, Fire will raise a FireExit with
code 2. When used with the help or trace flags, Fire will raise a
FireExit with code 0 if successful.
"""
name = name or os.path.basename(sys.argv[0])
# Get args as a list.
if isinstance(command, six.string_types):
args = shlex.split(command)
elif isinstance(command, (list, tuple)):
args = command
elif command is None:
# Use the command line args by default if no command is specified.
args = sys.argv[1:]
else:
raise ValueError('The command argument must be a string or a sequence of '
'arguments.')
# Determine the calling context.
caller = inspect.stack()[1]
caller_frame = caller[0]
caller_globals = caller_frame.f_globals
caller_locals = caller_frame.f_locals
context = {}
context.update(caller_globals)
context.update(caller_locals)
component_trace = _Fire(component, args, context, name)
if component_trace.HasError():
for help_flag in ['-h', '--help']:
if help_flag in component_trace.elements[-1].args:
command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand())
print(('WARNING: The proper way to show help is {cmd}.\n'
'Showing help anyway.\n').format(cmd=pipes.quote(command)),
file=sys.stderr)
print('Fire trace:\n{trace}\n'.format(trace=component_trace),
file=sys.stderr)
result = component_trace.GetResult()
print(
helputils.HelpString(result, component_trace, component_trace.verbose),
file=sys.stderr)
raise FireExit(2, component_trace)
elif component_trace.show_trace and component_trace.show_help:
print('Fire trace:\n{trace}\n'.format(trace=component_trace),
file=sys.stderr)
result = component_trace.GetResult()
print(
helputils.HelpString(result, component_trace, component_trace.verbose),
file=sys.stderr)
raise FireExit(0, component_trace)
elif component_trace.show_trace:
print('Fire trace:\n{trace}'.format(trace=component_trace),
file=sys.stderr)
raise FireExit(0, component_trace)
elif component_trace.show_help:
result = component_trace.GetResult()
print(
helputils.HelpString(result, component_trace, component_trace.verbose),
file=sys.stderr)
raise FireExit(0, component_trace)
else:
_PrintResult(component_trace, verbose=component_trace.verbose)
result = component_trace.GetResult()
return result
def CompletionScript(name, component):
"""Returns the text of the Bash completion script for a Fire CLI."""
return completion.Script(name, component)
class FireError(Exception):
"""Exception used by Fire when a Fire command cannot be executed.
These exceptions are not raised by the Fire function, but rather are caught
and added to the FireTrace.
"""
class FireExit(SystemExit):
"""An exception raised by Fire to the client in the case of a FireError.
The trace of the Fire program is available on the `trace` property.
This exception inherits from SystemExit, so clients may explicitly catch it
with `except SystemExit` or `except FireExit`. If not caught, this exception
will cause the client program to exit without a stacktrace.
"""
def __init__(self, code, component_trace):
"""Constructs a FireExit exception.
Args:
code: (int) Exit code for the Fire CLI.
component_trace: (FireTrace) The trace for the Fire command.
"""
super(FireExit, self).__init__(code)
self.trace = component_trace
def _PrintResult(component_trace, verbose=False):
"""Prints the result of the Fire call to stdout in a human readable way."""
# TODO: Design human readable deserializable serialization method
# and move serialization to it's own module.
result = component_trace.GetResult()
if isinstance(result, (list, set, types.GeneratorType)):
for i in result:
print(_OneLineResult(i))
elif inspect.isgeneratorfunction(result):
raise NotImplementedError
elif isinstance(result, dict):
print(_DictAsString(result, verbose))
elif isinstance(result, tuple):
print(_OneLineResult(result))
elif isinstance(result,
(bool, six.string_types, six.integer_types, float, complex)):
print(result)
elif result is not None:
print(helputils.HelpString(result, component_trace, verbose))
def _DictAsString(result, verbose=False):
"""Returns a dict as a string.
Args:
result: The dict to convert to a string
verbose: Whether to include 'hidden' members, those keys starting with _.
Returns:
A string representing the dict
"""
result = {key: value for key, value in result.items()
if _ComponentVisible(key, verbose)}
if not result:
return '{}'
longest_key = max(len(str(key)) for key in result.keys())
format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1)
lines = []
for key, value in result.items():
line = format_string.format(key=str(key) + ':',
value=_OneLineResult(value))
lines.append(line)
return '\n'.join(lines)
def _ComponentVisible(component, verbose=False):
"""Returns whether a component should be visible in the output."""
return (
verbose
or not isinstance(component, six.string_types)
or not component.startswith('_'))
def _OneLineResult(result):
"""Returns result serialized to a single line string."""
# TODO: Ensure line is fewer than eg 120 characters.
if isinstance(result, six.string_types):
return str(result).replace('\n', ' ')
try:
# Don't force conversion to ascii.
return json.dumps(result, ensure_ascii=False)
except (TypeError, ValueError):
return str(result).replace('\n', ' ')
def _Fire(component, args, context, name=None):
"""Execute a Fire command on a target component using the args supplied.
Arguments that come after a final isolated '--' are treated as Flags, eg for
interactive mode or completion script generation.
Other arguments are consumed by the execution of the Fire command, eg in the
traversal of the members of the component, or in calling a function or
instantiating a class found during the traversal.
The steps performed by this method are:
1. Parse any Flag args (the args after the final --)
2. Start with component as the current component.
2a. If the current component is a class, instantiate it using args from args.
2b. If the current component is a routine, call it using args from args.
2c. Otherwise access a member from component using an arg from args.
2d. Repeat 2a-2c until no args remain.
3a. Embed into ipython REPL if interactive mode is selected.
3b. Generate a completion script if that flag is provided.
In step 2, arguments will only ever be consumed up to a separator; a single
step will never consume arguments from both sides of a separator.
The separator defaults to a hyphen (-), and can be overwritten with the
--separator Fire argument.
Args:
component: The target component for Fire.
args: A list of args to consume in Firing on the component, usually from
the command line.
context: A dict with the local and global variables available at the call
to Fire.
name: Optional. The name of the command. Used in interactive mode and in
the tab completion script.
Returns:
FireTrace of components starting with component, tracing Fire's execution
path as it consumes args.
Raises:
ValueError: If there are arguments that cannot be consumed.
ValueError: If --completion is specified but no name available.
"""
args, flag_args = parser.SeparateFlagArgs(args)
argparser = parser.CreateParser()
parsed_flag_args, unused_args = argparser.parse_known_args(flag_args)
verbose = parsed_flag_args.verbose
interactive = parsed_flag_args.interactive
separator = parsed_flag_args.separator
show_completion = parsed_flag_args.completion
show_help = parsed_flag_args.help
show_trace = parsed_flag_args.trace
# component can be a module, class, routine, object, etc.
if component is None:
component = context
initial_component = component
component_trace = trace.FireTrace(
initial_component=initial_component, name=name, separator=separator,
verbose=verbose, show_help=show_help, show_trace=show_trace)
instance = None
remaining_args = args
while True:
last_component = component
initial_args = remaining_args
if not remaining_args and (show_help or interactive or show_trace
or show_completion):
# Don't initialize the final class or call the final function unless
# there's a separator after it, and instead process the current component.
break
saved_args = []
used_separator = False
if separator in remaining_args:
# For the current component, only use arguments up to the separator.
separator_index = remaining_args.index(separator)
saved_args = remaining_args[separator_index + 1:]
remaining_args = remaining_args[:separator_index]
used_separator = True
assert separator not in remaining_args
if inspect.isclass(component) or inspect.isroutine(component):
# The component is a class or a routine; we'll try to initialize it or
# call it.
isclass = inspect.isclass(component)
try:
target = component.__name__
filename, lineno = inspectutils.GetFileAndLine(component)
component, consumed_args, remaining_args, capacity = _CallCallable(
component, remaining_args)
# Update the trace.
if isclass:
component_trace.AddInstantiatedClass(
component, target, consumed_args, filename, lineno, capacity)
else:
component_trace.AddCalledRoutine(
component, target, consumed_args, filename, lineno, capacity)
except FireError as error:
component_trace.AddError(error, initial_args)
return component_trace
if last_component is initial_component:
# If the initial component is a class, keep an instance for use with -i.
instance = component
elif isinstance(component, (list, tuple)) and remaining_args:
# The component is a tuple or list; we'll try to access a member.
arg = remaining_args[0]
try:
index = int(arg)
component = component[index]
except (ValueError, IndexError):
error = FireError(
'Unable to index into component with argument:', arg)
component_trace.AddError(error, initial_args)
return component_trace
remaining_args = remaining_args[1:]
filename = None
lineno = None
component_trace.AddAccessedProperty(
component, index, [arg], filename, lineno)
elif isinstance(component, dict) and remaining_args:
# The component is a dict; we'll try to access a member.
target = remaining_args[0]
if target in component:
component = component[target]
elif target.replace('-', '_') in component:
component = component[target.replace('-', '_')]
else:
# The target isn't present in the dict as a string, but maybe it is as
# another type.
# TODO: Consider alternatives for accessing non-string keys.
found_target = False
for key, value in component.items():
if target == str(key):
component = value
found_target = True
break
if not found_target:
error = FireError(
'Cannot find target in dict:', target, component)
component_trace.AddError(error, initial_args)
return component_trace
remaining_args = remaining_args[1:]
filename = None
lineno = None
component_trace.AddAccessedProperty(
component, target, [target], filename, lineno)
elif remaining_args:
# We'll try to access a member of the component.
try:
target = remaining_args[0]
component, consumed_args, remaining_args = _GetMember(
component, remaining_args)
filename, lineno = inspectutils.GetFileAndLine(component)
component_trace.AddAccessedProperty(
component, target, consumed_args, filename, lineno)
except FireError as error:
component_trace.AddError(error, initial_args)
return component_trace
if used_separator:
# Add back in the arguments from after the separator.
if remaining_args:
remaining_args = remaining_args + [separator] + saved_args
elif (inspect.isclass(last_component)
or inspect.isroutine(last_component)):
remaining_args = saved_args
component_trace.AddSeparator()
elif component is not last_component:
remaining_args = [separator] + saved_args
else:
# It was an unnecessary separator.
remaining_args = saved_args
if component is last_component and remaining_args == initial_args:
# We're making no progress.
break
if remaining_args:
component_trace.AddError(
FireError('Could not consume arguments:', remaining_args),
initial_args)
return component_trace
if show_completion:
if name is None:
raise ValueError('Cannot make completion script without command name')
script = CompletionScript(name, initial_component)
component_trace.AddCompletionScript(script)
if interactive:
variables = context.copy()
if name is not None:
variables[name] = initial_component
variables['component'] = initial_component
variables['result'] = component
variables['trace'] = component_trace
if instance is not None:
variables['self'] = instance
interact.Embed(variables, verbose)
component_trace.AddInteractiveMode()
return component_trace
def _GetMember(component, args):
"""Returns a subcomponent of component by consuming an arg from args.
Given a starting component and args, this function gets a member from that
component, consuming one arg in the process.
Args:
component: The component from which to get a member.
args: Args from which to consume in the search for the next component.
Returns:
component: The component that was found by consuming an arg.
consumed_args: The args that were consumed by getting this member.
remaining_args: The remaining args that haven't been consumed yet.
Raises:
FireError: If we cannot consume an argument to get a member.
"""
members = dict(inspect.getmembers(component))
arg = args[0]
arg_names = [
arg,
arg.replace('-', '_'), # treat '-' as '_'.
]
for arg_name in arg_names:
if arg_name in members:
return members[arg_name], [arg], args[1:]
raise FireError('Could not consume arg:', arg)
def _CallCallable(fn, args):
"""Calls the function fn by consuming args from args.
Args:
fn: The function to call or class to instantiate.
args: Args from which to consume for calling the function.
Returns:
component: The object that is the result of the function call.
consumed_args: The args that were consumed for the function call.
remaining_args: The remaining args that haven't been consumed yet.
capacity: Whether the call could have taken additional args.
"""
parse = _MakeParseFn(fn)
(varargs, kwargs), consumed_args, remaining_args, capacity = parse(args)
result = fn(*varargs, **kwargs)
return result, consumed_args, remaining_args, capacity
def _MakeParseFn(fn):
"""Creates a parse function for fn.
Args:
fn: The function or class to create the parse function for.
Returns:
A parse function for fn. The parse function accepts a list of arguments
and returns (varargs, kwargs), remaining_args. The original function fn
can then be called with fn(*varargs, **kwargs). The remaining_args are
the leftover args from the arguments to the parse function.
"""
fn_spec = inspectutils.GetFullArgSpec(fn)
all_args = fn_spec.args + fn_spec.kwonlyargs
metadata = decorators.GetMetadata(fn)
# Note: num_required_args is the number of positional arguments without
# default values. All of these arguments are required.
num_required_args = len(fn_spec.args) - len(fn_spec.defaults)
required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults)
def _ParseFn(args):
"""Parses the list of `args` into (varargs, kwargs), remaining_args."""
kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs(
args, all_args, fn_spec.varkw)
# Note: _ParseArgs modifies kwargs.
parsed_args, kwargs, remaining_args, capacity = _ParseArgs(
fn_spec.args, fn_spec.defaults, num_required_args, kwargs,
remaining_args, metadata)
if fn_spec.varargs or fn_spec.varkw:
# If we're allowed *varargs or **kwargs, there's always capacity.
capacity = True
extra_kw = set(kwargs) - set(fn_spec.kwonlyargs)
if fn_spec.varkw is None and extra_kw:
raise FireError('Unexpected kwargs present:', extra_kw)
missing_kwonly = set(required_kwonly) - set(kwargs)
if missing_kwonly:
raise FireError('Missing required flags:', missing_kwonly)
# If we accept *varargs, then use all remaining arguments for *varargs.
if fn_spec.varargs is not None:
varargs, remaining_args = remaining_args, []
else:
varargs = []
for index, value in enumerate(varargs):
varargs[index] = _ParseValue(value, None, None, metadata)
varargs = parsed_args + varargs
remaining_args += remaining_kwargs
consumed_args = args[:len(args) - len(remaining_args)]
return (varargs, kwargs), consumed_args, remaining_args, capacity
return _ParseFn
def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs,
remaining_args, metadata):
"""Parses the positional and named arguments from the available supplied args.
Modifies kwargs, removing args as they are used.
Args:
fn_args: A list of argument names that the target function accepts,
including positional and named arguments, but not the varargs or kwargs
names.
fn_defaults: A list of the default values in the function argspec.
num_required_args: The number of required arguments from the function's
argspec. This is the number of arguments without a default value.
kwargs: Dict with named command line arguments and their values.
remaining_args: The remaining command line arguments, which may still be
used as positional arguments.
metadata: Metadata about the function, typically from Fire decorators.
Returns:
parsed_args: A list of values to be used as positional arguments for calling
the target function.
kwargs: The input dict kwargs modified with the used kwargs removed.
remaining_args: A list of the supplied args that have not been used yet.
capacity: Whether the call could have taken args in place of defaults.
Raises:
FireError: if additional positional arguments are expected, but none are
available.
"""
accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS)
capacity = False # If we see a default get used, we'll set capacity to True
# Select unnamed args.
parsed_args = []
for index, arg in enumerate(fn_args):
value = kwargs.pop(arg, None)
if value is not None: # A value is specified at the command line.
value = _ParseValue(value, index, arg, metadata)
parsed_args.append(value)
else: # No value has been explicitly specified.
if remaining_args and accepts_positional_args:
# Use a positional arg.
value = remaining_args.pop(0)
value = _ParseValue(value, index, arg, metadata)
parsed_args.append(value)
elif index < num_required_args:
raise FireError(
'The function received no value for the required argument:', arg)
else:
# We're past the args for which there's no default value.
# There's a default value for this arg.
capacity = True
default_index = index - num_required_args # index into the defaults.
parsed_args.append(fn_defaults[default_index])
for key, value in kwargs.items():
kwargs[key] = _ParseValue(value, None, key, metadata)
return parsed_args, kwargs, remaining_args, capacity
def _ParseKeywordArgs(args, fn_args, fn_keywords):
"""Parses the supplied arguments for keyword arguments.
Given a list of arguments, finds occurences of --name value, and uses 'name'
as the keyword and 'value' as the value. Constructs and returns a dictionary
of these keyword arguments, and returns a list of the remaining arguments.
Only if fn_keywords is None, this only finds argument names used by the
function, specified through fn_args.
This returns the values of the args as strings. They are later processed by
_ParseArgs, which converts them to the appropriate type.
Args:
args: A list of arguments
fn_args: A list of argument names that the target function accepts,
including positional and named arguments, but not the varargs or kwargs
names.
fn_keywords: The argument name for **kwargs, or None if **kwargs not used
Returns:
kwargs: A dictionary mapping keywords to values.
remaining_kwargs: A list of the unused kwargs from the original args.
remaining_args: A list of the unused arguments from the original args.
"""
kwargs = {}
remaining_kwargs = []
remaining_args = []
if not args:
return kwargs, remaining_kwargs, remaining_args
skip_argument = False
for index, argument in enumerate(args):
if skip_argument:
skip_argument = False
continue
arg_consumed = False
if argument.startswith('--'):
# This is a named argument; get its value from this arg or the next.
got_argument = False
keyword = argument[2:]
contains_equals = '=' in keyword
is_bool_syntax = (
not contains_equals and
(index + 1 == len(args) or args[index + 1].startswith('--')))
if contains_equals:
keyword, value = keyword.split('=', 1)
got_argument = True
elif is_bool_syntax:
# Since there's no next arg or the next arg is a Flag, we consider
# this flag to be a boolean.
got_argument = True
if keyword in fn_args:
value = 'True'
elif keyword.startswith('no'):
keyword = keyword[2:]
value = 'False'
else:
value = 'True'
else:
if index + 1 < len(args):
value = args[index + 1]
got_argument = True
keyword = keyword.replace('-', '_')
# In order for us to consume the argument as a keyword arg, we either:
# Need to be explicitly expecting the keyword, or we need to be
# accepting **kwargs.
if got_argument:
skip_argument = not contains_equals and not is_bool_syntax
arg_consumed = True
if keyword in fn_args or fn_keywords:
kwargs[keyword] = value
else:
remaining_kwargs.append(argument)
if skip_argument:
remaining_kwargs.append(args[index + 1])
if not arg_consumed:
# The argument was not consumed, so it is still a remaining argument.
remaining_args.append(argument)
return kwargs, remaining_kwargs, remaining_args
def _ParseValue(value, index, arg, metadata):
"""Parses value, a string, into the appropriate type.
The function used to parse value is determined by the remaining arguments.
Args:
value: The string value to be parsed, typically a command line argument.
index: The index of the value in the function's argspec.
arg: The name of the argument the value is being parsed for.
metadata: Metadata about the function, typically from Fire decorators.
Returns:
value, parsed into the appropriate type for calling a function.
"""
parse_fn = parser.DefaultParseValue
# We check to see if any parse function from the fn metadata applies here.
parse_fns = metadata.get(decorators.FIRE_PARSE_FNS)
if parse_fns:
default = parse_fns['default']
positional = parse_fns['positional']
named = parse_fns['named']
if index is not None and 0 <= index < len(positional):
parse_fn = positional[index]
elif arg in named:
parse_fn = named[arg]
elif default is not None:
parse_fn = default
return parse_fn(value)
| 2.921875 | 3 |
app.py | AmirValeev/auto-ml-classifier | 0 | 2022 | <reponame>AmirValeev/auto-ml-classifier<gh_stars>0
import os, ast
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
import pickle
def main():
# Get the dataset from the users GitHub repository
dataset_path = "https://raw.githubusercontent.com/" + os.environ["GITHUB_REPOSITORY"] + "/master/dataset.csv"
data = pd.read_csv(dataset_path)
print()
print(data.describe())
x=data.iloc[:,:-1]
y=data.iloc[:,-1]
column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output variable
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=0)
#define a pipeline
pipe = make_pipeline(column_trans,SVC())
pipe.fit(x_train,y_train) #training the model
print("\nModel Training Finished")
accuracy = pipe.score(x_test,y_test)
print("\nAccuracy of the Model: "+str(accuracy*100))
if pipe:
pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in docker container
if not os.environ["INPUT_MYINPUT"] == 'zeroinputs':
inputs = ast.literal_eval(os.environ["INPUT_MYINPUT"])
print("\nThe Predicted Ouput is :")
output = pipe.predict([inputs])
print(output)
else:
output = ["None"]
print("\nUser didn't provided inputs to predict")
print("\n=======================Action Completed========================")
print(f"::set-output name=myOutput::{output[0]}")
if __name__ == "__main__":
main()
| 3.078125 | 3 |
util/headers.py | giuseppe/quay | 2,027 | 2023 | <gh_stars>1000+
import base64
def parse_basic_auth(header_value):
"""
Attempts to parse the given header value as a Base64-encoded Basic auth header.
"""
if not header_value:
return None
parts = header_value.split(" ")
if len(parts) != 2 or parts[0].lower() != "basic":
return None
try:
basic_parts = base64.b64decode(parts[1]).split(":", 1)
if len(basic_parts) != 2:
return None
return basic_parts
except ValueError:
return None
| 2.96875 | 3 |
indico/core/signals/event/core.py | tobiashuste/indico | 0 | 2024 | <gh_stars>0
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.core.signals.event import _signals
sidemenu = _signals.signal('sidemenu', """
Expected to return ``MenuEntryData`` objects to be added to the event side menu.
A single entry can be returned directly, multiple entries must be yielded.
""")
deleted = _signals.signal('deleted', """
Called when an event is deleted. The *sender* is the event object.
The `user` kwarg contains the user performing the deletion if available.
""")
updated = _signals.signal('updated', """
Called when basic data of an event is updated. The *sender* is the event.
A dict of changes is passed in the `changes` kwarg, with ``(old, new)``
tuples for each change. Note than the `person_links` change may happen
with `old` and `new` being the same lists for technical reasons. If the
key is present, it should be assumed that something changed (usually
the order or some data on the person link).
""")
cloned = _signals.signal('cloned', """
Called when an event is cloned. The *sender* is the `Event` object of
the old event, the new event is passed in the `new_event` kwarg.
""")
type_changed = _signals.signal('type-changed', """
Called when the type of an event is changed. The `sender` is the event,
the old type is passed in the `old_type` kwarg.
""")
moved = _signals.signal('moved', """
Called when an event is moved to a different category. The `sender` is the event,
the old category is in the `old_parent` kwarg.
""")
created = _signals.signal('created', """
Called when a new event is created. The `sender` is the new Event.
""")
session_updated = _signals.signal('session-updated', """
Called when a session is updated. The *sender* is the session.
""")
session_deleted = _signals.signal('session-deleted', """
Called when a session is deleted. The *sender* is the session.
""")
session_block_deleted = _signals.signal('session-block-deleted', """
Called when a session block is deleted. The *sender* is the session block.
This signal is called before the ``db.session.delete()`` on the block is
executed.
""")
timetable_buttons = _signals.signal('timetable-buttons', """
Expected to return a list of tuples ('button_name', 'js-call-class').
Called when building the timetable view.
""")
get_log_renderers = _signals.signal('get-log-renderers', """
Expected to return `EventLogRenderer` classes.
""")
get_feature_definitions = _signals.signal('get-feature-definitions', """
Expected to return `EventFeature` subclasses.
""")
metadata_postprocess = _signals.signal('metadata-postprocess', """
Called right after a dict-like representation of an event is created,
so that plugins can add their own fields.
The *sender* is a string parameter specifying the source of the metadata.
The *event* kwarg contains the event object. The metadata is passed in
the `data` kwarg.
The signal should return a dict that will be used to update the
original representation (fields to add or override).
""")
| 2.125 | 2 |
cinder/tests/unit/volume/drivers/emc/scaleio/test_delete_volume.py | aarunsai81/netapp | 11 | 2025 | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
from cinder.tests.unit.volume.drivers.emc.scaleio import mocks
class TestDeleteVolume(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.delete_volume()``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestDeleteVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume.fake_volume_obj(
ctx, **{'provider_id': fake.PROVIDER_ID})
self.volume_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(self.driver._id_to_base64(self.volume.id))
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: self.volume.id,
'instances/Volume::{}/action/removeMappedSdc'.format(
self.volume.provider_id): self.volume.provider_id,
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): self.volume.provider_id,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/Volume::{}/action/removeVolume'.format(
self.volume.provider_id
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
},
}
def test_bad_login_and_volume(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_volume,
self.volume)
def test_delete_volume(self):
"""Setting the unmap volume before delete flag for tests """
self.driver.configuration.set_override(
'sio_unmap_volume_before_deletion',
override=True)
self.driver.delete_volume(self.volume)
| 1.914063 | 2 |
example-package/transportation_tutorials/__init__.py | chrisc20042001/python-for-transportation-modeling | 0 | 2026 | # -*- coding: utf-8 -*-
__version__ = '1.0.2'
import os
import appdirs
import osmnx as ox
import joblib
import requests
from .files import load_vars, save_vars, cached, inflate_tar, download_zipfile
from .data import data, list_data, problematic
from .tools.view_code import show_file
from . import mapping
cache_dir = None
memory = None
def set_cache_dir(location=None, compress=True, verbose=0, **kwargs):
"""
Set up a cache directory for use with the tutorials.
Parameter
---------
cache_dir : Path-like or False, optional
A path for the cache files. Set to False to disable caching.
"""
global memory, cache_dir
if location is None:
location = appdirs.user_cache_dir('transportation_tutorials')
if location is False:
location = None
memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs)
make_cache = (
(ox, 'gdf_from_place'),
(ox, 'graph_from_bbox'),
(requests, 'get'),
(requests, 'post'),
)
for module, func_name in make_cache:
try:
func = getattr(module, f"_{func_name}_orig")
except AttributeError:
func = getattr(module, func_name)
setattr(module, f"_{func_name}_orig", func)
setattr(module, func_name, memory.cache(func))
set_cache_dir()
| 2.359375 | 2 |
common/common.py | czajowaty/curry-bot | 3 | 2027 | <reponame>czajowaty/curry-bot<filename>common/common.py
from requests.models import PreparedRequest
def is_valid_url(url):
prepared_request = PreparedRequest()
try:
prepared_request.prepare_url(url, None)
return True
except Exception as e:
return False
class Timestamp: # a speedrun.com style timestamp e.g. "3h 53m 233s 380ms"
def __init__(self, s):
self.hours, self.minutes, self.seconds, self.milliseconds = 0, 0, 0, 0
for arg in s.split():
if arg.endswith("ms"):
self.milliseconds += int(arg[:-2])
elif arg.endswith("s"):
self.seconds += int(arg[:-1])
elif arg.endswith("m"):
self.minutes += int(arg[:-1])
elif arg.endswith("h"):
self.hours += int(arg[:-1])
@staticmethod
def from_milliseconds(ms):
t = Timestamp("0ms")
temp = ms
t.hours = temp // 3600000
temp %= 3600000
t.minutes = temp // 60000
temp %= 60000
t.seconds = temp // 1000
t.milliseconds = temp % 1000
return t
def __str__(self):
result = []
if self.hours != 0:
result.append("{}h".format(self.hours))
if not (self.hours == 0 and self.minutes == 0):
result.append("{}m".format(self.minutes))
result.append("{}s".format(self.seconds))
if self.milliseconds > 0:
result.append("{}ms".format(self.milliseconds))
return ' '.join(result)
def __eq__(self, other):
return self.hours == other.hours and self.minutes == other.minutes and self.seconds == other.seconds and self.milliseconds == other.milliseconds
def __lt__(self, other):
if self.hours < other.hours:
return True
elif self.hours > other.hours:
return False
if self.minutes < other.minutes:
return True
elif self.minutes > other.minutes:
return False
if self.seconds < other.seconds:
return True
elif self.seconds > other.seconds:
return False
return self.milliseconds < other.milliseconds
| 2.8125 | 3 |
hendrix/test/test_ux.py | anthonyalmarza/hendrix | 0 | 2028 | import os
import sys
from . import HendrixTestCase, TEST_SETTINGS
from hendrix.contrib import SettingsError
from hendrix.options import options as hx_options
from hendrix import ux
from mock import patch
class TestMain(HendrixTestCase):
def setUp(self):
super(TestMain, self).setUp()
self.DEFAULTS = hx_options()
os.environ['DJANGO_SETTINGS_MODULE'] = ''
self.devnull = open(os.devnull, 'w')
self.args_list = ['hx', 'start']
self.patcher = patch('hendrix.ux.findSettingsModule')
self.patcher.start()
def tearDown(self):
super(TestMain, self).tearDown()
self.devnull.close()
self.patcher.stop()
def test_settings_from_system_variable(self):
django_settings = 'django.inanity'
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = django_settings
options = self.DEFAULTS
self.assertEqual(options['settings'], '')
options = ux.djangoVsWsgi(options)
self.assertEqual(options['settings'], django_settings)
def test_settings_wsgi_absense(self):
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = ""
self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS)
def test_user_settings_overrides_system_variable(self):
django_settings = 'django.inanity'
with patch('hendrix.ux.findSettingsModule') as findSettingsMod:
findSettingsMod.return_value = django_settings
options = self.DEFAULTS
user_settings = 'myproject.settings'
options['settings'] = user_settings
self.assertEqual(options['settings'], user_settings)
options = ux.djangoVsWsgi(options)
self.assertEqual(options['settings'], user_settings)
def test_wsgi_correct_wsgi_path_works(self):
wsgi_dot_path = 'hendrix.test.wsgi'
options = self.DEFAULTS
options.update({'wsgi': wsgi_dot_path})
options = ux.djangoVsWsgi(options)
self.assertEqual(options['wsgi'], wsgi_dot_path)
def test_wsgi_wrong_path_raises(self):
wsgi_dot_path = '_this.leads.nowhere.man'
options = self.DEFAULTS
options.update({'wsgi': wsgi_dot_path})
self.assertRaises(ImportError, ux.djangoVsWsgi, options)
def test_cwd_exposure(self):
cwd = os.getcwd()
_path = sys.path
sys.path = [p for p in _path if p != cwd]
self.assertTrue(cwd not in sys.path)
ux.exposeProject(self.DEFAULTS)
self.assertTrue(cwd in sys.path)
def test_pythonpath(self):
options = self.DEFAULTS
test_path = os.path.join(
os.path.dirname(os.getcwd()),
'hendrix/test/testproject'
)
options['pythonpath'] = test_path
ux.exposeProject(options)
self.assertTrue(test_path in sys.path)
sys.path = [p for p in sys.path if p != test_path]
def test_shitty_pythonpath(self):
options = self.DEFAULTS
test_path = '/if/u/have/this/path/you/suck'
options['pythonpath'] = test_path
self.assertRaises(IOError, ux.exposeProject, options)
def test_dev_friendly_options(self):
options = self.DEFAULTS
options['dev'] = True
self.assertFalse(options['reload'])
self.assertFalse(options['loud'])
options = ux.devFriendly(options)
self.assertTrue(options['reload'])
self.assertTrue(options['loud'])
def test_noise_control_daemonize(self):
options = self.DEFAULTS
options['quiet'] = True
options['daemonize'] = True
stdout = sys.stdout
stderr = sys.stderr
redirect = ux.noiseControl(options)
self.assertEqual(sys.stdout.name, stdout.name)
self.assertEqual(sys.stderr.name, stderr.name)
self.assertEqual(redirect, None)
def test_noise_control_traceback(self):
options = self.DEFAULTS
options['quiet'] = True
options['daemonize'] = True
options['traceback'] = True
stdout = sys.stdout
stderr = sys.stderr
redirect = ux.noiseControl(options)
self.assertEqual(sys.stdout.name, stdout.name)
self.assertEqual(sys.stderr.name, stderr.name)
self.assertEqual(redirect, None)
def test_main_with_daemonize(self):
sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS]
class Process(object):
def poll(self):
return 0
with patch('time.sleep'):
with patch('subprocess.Popen') as popen:
popen.return_value = Process()
ux.main()
self.assertTrue(popen.called)
self.assertTrue('--settings' in popen.call_args[0][0])
sys.argv = []
def test_options_structure(self):
"""
A test to ensure that HendrixDeploy.options also has the complete set
of options available
"""
deploy = self.wsgiDeploy()
expected_keys = self.DEFAULTS.keys()
actual_keys = deploy.options.keys()
self.assertListEqual(expected_keys, actual_keys)
| 2.25 | 2 |
discord/types/interactions.py | Voxel-Fox-Ltd/Novus | 61 | 2029 | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Dict, TypedDict, Union, List, Literal
from .snowflake import Snowflake
from .components import Component, SelectOption
from .embed import Embed
from .channel import ChannelType, Channel
from .member import Member
from .role import Role
from .user import User
if TYPE_CHECKING:
from .message import AllowedMentions, Message
ApplicationCommandType = Literal[1, 2, 3]
class ApplicationCommand(TypedDict):
id: Snowflake
application_id: Snowflake
name: str
description: str
options: Optional[List[ApplicationCommandOption]]
type: Optional[ApplicationCommandType]
ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
class ApplicationCommandOption(TypedDict):
type: ApplicationCommandOptionType
name: str
description: str
required: bool
choices: Optional[List[ApplicationCommandOptionChoice]]
options: Optional[List[ApplicationCommandOption]]
class ApplicationCommandOptionChoice(TypedDict):
name: str
value: Union[str, int]
ApplicationCommandPermissionType = Literal[1, 2]
class ApplicationCommandPermissions(TypedDict):
id: Snowflake
type: ApplicationCommandPermissionType
permission: bool
class BaseGuildApplicationCommandPermissions(TypedDict):
permissions: List[ApplicationCommandPermissions]
class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions):
id: Snowflake
class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions):
application_id: Snowflake
guild_id: Snowflake
InteractionType = Literal[1, 2, 3]
class _ApplicationCommandInteractionDataOption(TypedDict):
name: str
class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption):
type: Literal[1, 2]
options: List[ApplicationCommandInteractionDataOption]
class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption):
type: Literal[3]
value: str
class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption):
type: Literal[4]
value: int
class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption):
type: Literal[5]
value: bool
class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption):
type: Literal[6, 7, 8, 9]
value: Snowflake
class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption):
type: Literal[10]
value: float
ApplicationCommandInteractionDataOption = Union[
_ApplicationCommandInteractionDataOptionString,
_ApplicationCommandInteractionDataOptionInteger,
_ApplicationCommandInteractionDataOptionSubcommand,
_ApplicationCommandInteractionDataOptionBoolean,
_ApplicationCommandInteractionDataOptionSnowflake,
_ApplicationCommandInteractionDataOptionNumber,
]
class ApplicationCommandResolvedPartialChannel(TypedDict):
id: Snowflake
type: ChannelType
permissions: str
name: str
class ApplicationCommandInteractionDataResolved(TypedDict, total=False):
users: Dict[Snowflake, User]
members: Dict[Snowflake, Member]
roles: Dict[Snowflake, Role]
channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel]
class ApplicationCommandInteractionDataOption(TypedDict):
name: str
type: int
value: Optional[str] # Optional[ApplicationCommandOptionType]
options: Optional[ApplicationCommandInteractionDataOption]
focused: Optional[bool]
components: Optional[List[ApplicationCommandInteractionDataOption]]
class _InteractionDataOptional(TypedDict, total=False):
resolved: Dict[str, dict]
options: List[ApplicationCommandInteractionDataOption]
custom_id: str
component_type: int
values: List[str]
target_id: Snowflake
components: List[ApplicationCommandInteractionDataOption]
class InteractionData(_InteractionDataOptional):
id: Snowflake
name: str
type: ApplicationCommandType
class InteractionResolved(TypedDict):
users: List[Union[User, Member]]
members: List[Member]
roles: List[Role]
channels: List[Channel]
messages: List[Message]
class _InteractionOptional(TypedDict, total=False):
data: InteractionData
guild_id: Snowflake
channel_id: Snowflake
member: Member
user: User
message: Message
guild_locale: str
class Interaction(_InteractionOptional):
id: Snowflake
application_id: Snowflake
type: InteractionType
token: str
version: int
resolved: InteractionResolved
locale: str
class InteractionApplicationCommandCallbackData(TypedDict, total=False):
tts: bool
content: str
embeds: List[Embed]
allowed_mentions: AllowedMentions
flags: int
components: List[Component]
InteractionResponseType = Literal[1, 4, 5, 6, 7]
class _InteractionResponseOptional(TypedDict, total=False):
data: InteractionApplicationCommandCallbackData
class InteractionResponse(_InteractionResponseOptional):
type: InteractionResponseType
class MessageInteraction(TypedDict):
id: Snowflake
type: InteractionType
name: str
user: User
class _EditApplicationCommandOptional(TypedDict, total=False):
description: str
options: Optional[List[ApplicationCommandOption]]
type: ApplicationCommandType
class EditApplicationCommand(_EditApplicationCommandOptional):
name: str
default_permission: bool
| 1.5 | 2 |
local/local_sign.py | EVAyo/chaoxing_auto_sign | 0 | 2030 | # -*- coding: utf8 -*-
import os
import re
import time
import json
import random
import asyncio
from typing import Optional, List, Dict
from aiohttp import ClientSession
from aiohttp.cookiejar import SimpleCookie
from lxml import etree
from bs4 import BeautifulSoup
from config import *
from message import server_chan_send
class AutoSign(object):
def __init__(self, username, password, schoolid=None, enc=None):
"""初始化就进行登录"""
self.headers = {
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36',
}
self.session = ClientSession(headers=self.headers)
self.username = username
self.password = password
self.schoolid = '' if schoolid is None else schoolid
self.enc = '' if enc is None else enc
async def check_login_status(self, status, text):
if status == 403:
return 1002
data = json.loads(text)
if data['result']:
return 1000 # 登录成功
else:
return 1001 # 登录信息有误
async def set_cookies(self):
"""设置cookies"""
cookie = await self.check_cookies()
if not cookie:
# 无效则重新登录,并保存cookies
status, text, cookie = await self.login()
login_status = await self.check_login_status(status, text)
if login_status == 1000:
cookies = self.dict_from_simple_cookie(cookie)
self.save_cookies(cookies)
else:
return 1001
else:
self.session.cookie_jar.update_cookies(cookie)
return 1000
def dict_from_simple_cookie(self, cookies) -> dict:
"""
从响应对象中抽取cookies
"""
result = {}
for key, value in cookies.items():
result[key] = value.value
return result
def save_cookies(self, cookies: dict):
"""保存cookies"""
with open(COOKIES_FILE_PATH, "r") as f:
data = json.load(f)
data[self.username] = cookies
with open(COOKIES_FILE_PATH, 'w') as f2:
json.dump(data, f2)
async def check_cookies(self) -> Optional[SimpleCookie]:
"""检测json文件内是否存有cookies,有则检测,无则登录"""
if "cookies.json" not in os.listdir(COOKIES_PATH):
with open(COOKIES_FILE_PATH, 'w+') as f:
f.write("{}")
with open(COOKIES_FILE_PATH, 'r') as f:
# json文件有无账号cookies, 没有,则直接返回假
try:
data = json.load(f)
cookies = data[self.username]
except Exception:
return False
# 检测cookies是否有效
async with self.session.request(method='GET',
url='http://mooc1-1.chaoxing.com/api/workTestPendingNew',
allow_redirects=False,
cookies=cookies) as resp:
if resp.status != 200:
print("cookie失效")
return None
else:
print("cookie有效!")
return cookies
async def login(self):
"""
登录并返回响应
"""
params = {
'name': self.username,
'pwd': <PASSWORD>,
'schoolid': self.schoolid,
'verify': 0
}
async with self.session.request(method='GET',
url='https://passport2.chaoxing.com/api/login',
params=params) as resp:
status = resp.status
text = await resp.text()
cookies = resp.cookies
return status, text, cookies
def check_activeid(self, activeid):
"""检测activeid是否存在,不存在则添加"""
activeid += self.username
if "activeid.json" not in os.listdir(ACTIVEID_PATH):
with open(ACTIVEID_FILE_PATH, 'w+') as f:
f.write("{}")
with open(ACTIVEID_FILE_PATH, 'r') as f:
try:
# 读取文件
data = json.load(f)
if data[activeid]:
return True
except BaseException:
# 如果出错,则表示没有此activeid
return False
def save_activeid(self, activeid):
"""保存已成功签到的activeid"""
activeid += self.username
if "activeid.json" not in os.listdir(ACTIVEID_PATH):
with open(ACTIVEID_FILE_PATH, 'w+') as f:
f.write("{}")
with open(ACTIVEID_FILE_PATH, 'r') as f:
data = json.load(f)
with open(ACTIVEID_FILE_PATH, 'w') as f2:
data[activeid] = True
json.dump(data, f2)
async def get_all_classid(self) -> list:
"""获取课程主页中所有课程的classid和courseid"""
res = []
async with self.session.request(method='GET',
url='http://mooc1-2.chaoxing.com/visit/interaction') as resp:
text = await resp.text()
soup = BeautifulSoup(text, "lxml")
course_list = soup.find_all(
'li', class_="course")
for course in course_list:
res.append((course.attrs['courseid'], course.attrs['clazzid'],
course.find_next('span', class_="course-name").text))
print('课程列表: ', res)
return res
async def get_sign_type(self, classid, courseid, activeid):
"""获取签到类型"""
params = {
'activeId': activeid,
'classId': classid,
'courseId': courseid
}
async with self.session.request(method='GET',
url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign',
params=params) as resp:
text = await resp.text()
h = etree.HTML(text)
sign_type = h.xpath('//div[@class="location"]/span/text()')
return sign_type
async def get_activeid(self, classid, courseid, classname):
"""访问任务面板获取课程的活动id"""
res = []
re_rule = r'([\d]+),2'
params = {
'courseId': courseid,
'jclassId': classid
}
async with self.session.request(method='GET',
url="https://mobilelearn.chaoxing.com/widget/pcpick/stu/index",
verify_ssl=False,
params=params) as resp:
text = await resp.text()
h = etree.HTML(text)
activeid_list = h.xpath('//*[@id="startList"]/div/div/@onclick')
for activeid in activeid_list:
activeid = re.findall(re_rule, activeid)
if not activeid:
continue
sign_type = await self.get_sign_type(classid, courseid, activeid[0])
res.append((activeid[0], sign_type[0]))
n = len(res)
if n:
d = {'num': n, 'class': {}}
for i in range(n):
if not self.check_activeid(res[i][0]):
d['class'][i] = {
'classid': classid,
'courseid': courseid,
'activeid': res[i][0],
'classname': classname,
'sign_type': res[i][1]
}
return d
async def general_sign(self, classid, courseid, activeid):
"""普通签到"""
params = {
'activeId': activeid,
'classId': classid,
'fid': '39037',
'courseId': courseid
}
async with self.session.request(
method='GET',
url="https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign",
params=params,
verify_ssl=False
) as resp:
text = await resp.text()
title = re.findall('<title>(.*)</title>', text)[0]
if "签到成功" not in title:
# 网页标题不含签到成功,则为拍照签到
return self.tphoto_sign(activeid)
else:
s = {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': title
}
return s
async def hand_sign(self, classid, courseid, activeid):
"""手势签到"""
params = {
'courseId': courseid,
'classId': classid,
'activeId': activeid
}
async with self.session.request(
method='GET',
url="https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn",
params=params,
verify_ssl=False
) as resp:
text = await resp.text()
title = re.findall('<title>(.*)</title>', text)
s = {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': title
}
return s
async def qcode_sign(self, activeid):
"""二维码签到"""
params = {
'enc': self.enc,
'name': '',
'activeId': activeid,
'uid': '',
'clientip': '',
'useragent': '',
'latitude': '-1',
'longitude': '-1',
'fid': '',
'appType': '15'
}
async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax',
params=params,
allow_redirects=False) as resp:
text = await resp.text()
return {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': text
}
async def addr_sign(self, activeid):
"""位置签到"""
params = {
'name': '',
'activeId': activeid,
'address': '中国',
'uid': '',
'clientip': clientip,
'latitude': latitude,
'longitude': longitude,
'fid': '',
'appType': '15',
'ifTiJiao': '1'
}
async with self.session.request(
method="GET",
url="https://mobilelearn.chaoxing.com/pptSign/stuSignajax",
params=params
) as resp:
text = await resp.text()
return {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': text
}
async def tphoto_sign(self, activeid, uid):
"""拍照签到"""
objectId = await self.upload_img(uid)
params = {
'name': '',
'activeId': activeid,
'address': '中国',
'uid': '',
'clientip': clientip,
'latitude': latitude,
'longitude': longitude,
'fid': '',
'appType': '15',
'ifTiJiao': '1',
'objectId': objectId
}
async with self.session.request(
method="GET",
url="https://mobilelearn.chaoxing.com/pptSign/stuSignajax",
params=params
) as resp:
text = await resp.text()
return {
'date': time.strftime("%m-%d %H:%M", time.localtime()),
'status': text
}
async def get_token(self):
"""获取上传文件所需参数token"""
url = 'https://pan-yz.chaoxing.com/api/token/uservalid'
async with self.session.request(
method='GET',
url=url
) as resp:
text = await resp.text()
token_dict = json.loads(text)
return token_dict['_token']
async def upload_img(self, uid):
"""上传图片"""
# 从图片文件夹内随机选择一张图片
try:
all_img = os.listdir(IMAGE_PATH)
except Exception as e:
os.mkdir(IMAGE_PATH)
all_img = 0
if len(all_img) == 0:
return "a5d588f7bce1994323c348982332e470"
else:
img = IMAGE_PATH + random.choice(all_img)
# uid = self.session.cookies.get_dict()['UID']
url = 'https://pan-yz.chaoxing.com/upload'
files = {'file': open(img, 'rb')}
uid = self.session.cookie_jar.filter_cookies('').get('UID').value
token = await self.get_token()
param = {
'puid': uid,
'_token': token
}
async with self.session.request(
method='POST',
url=url,
params=param,
data=files
) as resp:
text = await resp.text()
res_dict = json.loads(text)
return res_dict['objectId']
async def send_sign_request(self, classid, courseid, activeid, sign_type):
"""发送签到请求"""
if "手势" in sign_type:
return await self.hand_sign(classid, courseid, activeid)
elif "二维码" in sign_type:
return await self.qcode_sign(activeid)
elif "位置" in sign_type:
return await self.addr_sign(activeid)
elif "拍照" in sign_type:
return await self.tphoto_sign(activeid)
else:
return await self.general_sign(classid, courseid, activeid)
async def send_sign_result(self, results: List[Dict]):
"""
发送签到结果
"""
await server_chan_send(results, self.session)
async def start_sign_task(self):
"""开始所有签到任务"""
tasks = []
res = []
await self.set_cookies()
# 获取所有课程的classid和course_id
classid_courseId = await self.get_all_classid()
# 获取所有课程activeid和签到类型
for i in classid_courseId:
coroutine = self.get_activeid(i[1], i[0], i[2])
tasks.append(coroutine)
results: List[Dict] = await asyncio.gather(*tasks)
for r in results:
if r is None:
continue
for d in r['class'].values():
resp = await self.send_sign_request(
d['classid'],
d['courseid'],
d['activeid'],
d['sign_type']
)
if resp:
# 签到课程, 签到时间, 签到状态
sign_msg = {
'name': d['classname'],
'date': resp['date'],
'status': resp['status']
}
res.append(sign_msg)
if '失败' in resp['status']:
continue
# 签到成功后,新增activeid
self.save_activeid(d['activeid'])
return res
async def close_session(self):
await self.session.close() | 2.515625 | 3 |
build/scripts-3.6/fit_background_model.py | stahlberggroup/umierrorcorrect | 0 | 2031 | <filename>build/scripts-3.6/fit_background_model.py<gh_stars>0
#!python
import numpy as np
from numpy import inf
from numpy import nan
from scipy.optimize import fmin
from scipy.stats import beta
from scipy.special import beta as B
from scipy.special import comb
import argparse
import sys
def parseArgs():
'''Function for parsing arguments'''
parser = argparse.ArgumentParser(description="Pipeline for analyzing barcoded amplicon \
sequencing data with Unique molecular \
identifiers (UMI)")
parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for fitting parameters of the bgmodel')
parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile',
help='Path to file with non-background positions')
parser.add_argument('-out', '--out_file',dest='out_file',help="name of output file, default = %(default)s]",default="bgmodel.params")
parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant calling. [default = %(default)s]', default=3)
args = parser.parse_args(sys.argv[1:])
return(args)
def parse_cons_file(filename,fsize=3):
n1=[]
f1=[]
c1=[]
posx=[]
data=[]
with open(filename) as f:
for line in f:
if not line.startswith('Sample Name'):
line=line.rstrip('\n')
parts=line.split('\t')
pos=parts[1]+':'+parts[2]
name=parts[3]
#print(name)
if name not in "":
famsize=parts[-4]
if int(famsize)==fsize:
frac=float(parts[-2])
alt=parts[-1]
count=parts[-3]
if frac > 0 and alt not in 'N':
cov=int(parts[-5])
f1.append(float(frac))
n1.append(int(cov))
c1.append(int(count))
posx.append(pos)
data.append(line)
#print(name)
#print(famsize)
return(f1,n1,c1,posx,data)
def betaNLL(params,*args):
a,b = params
data = np.array(args[0])
pdf=beta.pdf(data,a,b,loc=0,scale=1)
lg=np.log(pdf)
#lg=np.where(lg==-np.inf,0,lg)
mask = np.isfinite(lg)
nll = -lg[mask].sum()
nll=-1*np.sum(lg)
return(nll)
def get_beta_parameters(data):
m=np.mean(data)
v=np.var(data)
a0=m*(m * (1-m) / v-1 )
b0=(1-m)*(m * (1-m) / v-1 )
result=fmin(betaNLL,[a0,b0],args=(data,))
return(result)
def run_fit_bgmodel(args):
spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120]
if args.nonbgposfile:
nonbgpos=[]
with open(args.nonbgposfile) as f:
for line in f:
line=line.rstrip()
nonbgpos.append(line)
else:
nonbgpos=spikepositions
if not args.cons_file:
args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0]
args.fsize=int(args.fsize)
f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize)
f1 = np.array(f1)
n1 = np.array(n1)
a1 = np.array(a1)
pos = np.array(pos)
data = np.array(data)
result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True])
#a=prob_bb(n1,a1,result[0],result[1])
print(pos,nonbgpos,np.isin(pos,nonbgpos))
with open(args.out_file,'w') as g:
g.write('{}\n'.format(result[0]))
g.write('{}\n'.format(result[1]))
#a[a==inf]=1e-10
#a[np.isnan(a)]=1e-10
#Q = -10*np.log10(a)
#data=np.array(data)
#plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png')
#if args.vc_method.lower()=='bbmodel':
# rout=data[Q >= float(args.qvalue_threshold)]
# Qsig=Q[Q >= float(args.qvalue_threshold)]
#else:
# rout=data[a1 >= float(args.count_cutoff)]
# Qsig=Q[a1 >= float(args.count_cutoff)]
#outfilename=args.output_path+'/'+args.sample_name+'2.vcf'
#write_vcf(outfilename,rout,Qsig,args.reference_file)
if __name__=='__main__':
args=parseArgs()
run_fit_bgmodel(args)
| 2.234375 | 2 |
caffe2/python/operator_test/partition_ops_test.py | KevinKecc/caffe2 | 585 | 2032 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase, rand_array
class TestPartitionOps(TestCase):
def test_configs(self):
# (main dims, partitions, main type, [list of (extra dims, type)])
configs = [
((10, ), 3),
((4, ), 10),
((10, 10), 4),
((100, ), 2),
((5, ), 1),
((1, ), 1),
((2, 10), 2),
]
suffixes = [
[],
[((2, 2), np.float32)],
[((3, ), np.int64), ((2, ), np.float32)],
]
return [
(main_dims, parts, main_type, extra, pack)
for main_dims, parts in configs
for main_type in [np.int32, np.int64] for extra in suffixes
for pack in [False, True]
]
def testPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
ins = ['in' + str(i) for i in range(1 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(1 + len(extra_ins))
]
op = core.CreateOperator(
'Partition', ins, outs, pack_first_input=(1 if pack else 0))
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i], d)
x.append(d)
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
print(x)
print(ref)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
# test inverse operation (GatherByKey)
if len(main_dims) == 1:
# currently only 1D key tensor supported
for i in range(len(extra_ins)):
expected_out = ins[i + 1]
gather_ins = [ins[0]] + [
outs[len(ins) * p + i + 1] for p in range(parts)]
actual_out = expected_out + '_actual'
op = core.CreateOperator(
'GatherByKey', gather_ins, actual_out)
workspace.RunOperatorOnce(op)
expected = workspace.FetchBlob(expected_out)
actual = workspace.FetchBlob(actual_out)
np.testing.assert_array_equal(expected, actual)
def testLengthsPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
# For LengthsSharding only 1-D tensors supported as a first input
if len(main_dims) > 1:
continue
ins = ['in' + str(i) for i in range(2 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(2 + len(extra_ins))
]
op = core.CreateOperator(
'LengthsPartition', ins, outs,
pack_first_input=(1 if pack else 0)
)
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i + 1], d)
x.append(d)
# Randomly generate length tensor as well
elements = np.random.randint(2, 10)
lengths = []
total_length = 0
for _ in range(elements - 1):
lengths.append(np.random.randint(main_dims[0] - total_length))
total_length += lengths[-1]
lengths.append(main_dims[0] - total_length)
workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32))
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
out.append(sharded_lengths)
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
if __name__ == "__main__":
import unittest
unittest.main()
| 1.710938 | 2 |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fib_common_cfg.py | Maikor/ydk-py | 0 | 2033 | """ Cisco_IOS_XR_fib_common_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR fib\-common package configuration.
This module contains definitions
for the following management objects\:
fib\: CEF configuration
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class FibPbtsFallback(Enum):
"""
FibPbtsFallback (Enum Class)
Fib pbts fallback
.. data:: list = 1
Fallback to class number list
.. data:: any = 2
Fallback to any class
.. data:: drop = 3
Fallback to drop
"""
list = Enum.YLeaf(1, "list")
any = Enum.YLeaf(2, "any")
drop = Enum.YLeaf(3, "drop")
class FibPbtsForwardClass(Enum):
"""
FibPbtsForwardClass (Enum Class)
Fib pbts forward class
.. data:: any = 8
Any class
"""
any = Enum.YLeaf(8, "any")
class Fib(Entity):
"""
CEF configuration
.. attribute:: pbts_forward_class_fallbacks
PBTS class configuration
**type**\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>`
.. attribute:: platform
FIB platform parameters
**type**\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>`
.. attribute:: auto_hash_recover
Set option for automatcially recovering consistent\-hashing state on interface up
**type**\: bool
.. attribute:: prefer_aib_routes
Set options for adjacency routes overriding RIB routes
**type**\: bool
.. attribute:: encap_sharing_disable
Set true to disable encapsulation sharing
**type**\: bool
.. attribute:: frr_follow_bgp_pic
Set option for fast\-reroute to follow BGP PIC update, not to wait for timeout
**type**\: bool
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib, self).__init__()
self._top_entity = None
self.yang_name = "fib"
self.yang_parent_name = "Cisco-IOS-XR-fib-common-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("pbts-forward-class-fallbacks", ("pbts_forward_class_fallbacks", Fib.PbtsForwardClassFallbacks)), ("platform", ("platform", Fib.Platform))])
self._leafs = OrderedDict([
('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])),
('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])),
('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])),
('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])),
])
self.auto_hash_recover = None
self.prefer_aib_routes = None
self.encap_sharing_disable = None
self.frr_follow_bgp_pic = None
self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks()
self.pbts_forward_class_fallbacks.parent = self
self._children_name_map["pbts_forward_class_fallbacks"] = "pbts-forward-class-fallbacks"
self.platform = Fib.Platform()
self.platform.parent = self
self._children_name_map["platform"] = "platform"
self._segment_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value)
class PbtsForwardClassFallbacks(Entity):
"""
PBTS class configuration
.. attribute:: pbts_forward_class_fallback
Set PBTS class for fallback
**type**\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>`
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.PbtsForwardClassFallbacks, self).__init__()
self.yang_name = "pbts-forward-class-fallbacks"
self.yang_parent_name = "fib"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("pbts-forward-class-fallback", ("pbts_forward_class_fallback", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))])
self._leafs = OrderedDict()
self.pbts_forward_class_fallback = YList(self)
self._segment_path = lambda: "pbts-forward-class-fallbacks"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value)
class PbtsForwardClassFallback(Entity):
"""
Set PBTS class for fallback
.. attribute:: forward_class_number (key)
PBTS forward class number
**type**\: union of the below types:
**type**\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>`
**type**\: int
**range:** 0..8
.. attribute:: fallback_type
Set PBTS fallback type
**type**\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>`
**mandatory**\: True
.. attribute:: fallback_class_number_array
Set PBTS fallback class number array
**type**\: list of int
**range:** 0..7
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__()
self.yang_name = "pbts-forward-class-fallback"
self.yang_parent_name = "pbts-forward-class-fallbacks"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['forward_class_number']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])),
('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])),
('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])),
])
self.forward_class_number = None
self.fallback_type = None
self.fallback_class_number_array = []
self._segment_path = lambda: "pbts-forward-class-fallback" + "[forward-class-number='" + str(self.forward_class_number) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value)
class Platform(Entity):
"""
FIB platform parameters
.. attribute:: label_switched_multicast
Options for label\-switched\-multicast parameters
**type**\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>`
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.Platform, self).__init__()
self.yang_name = "platform"
self.yang_parent_name = "fib"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("label-switched-multicast", ("label_switched_multicast", Fib.Platform.LabelSwitchedMulticast))])
self._leafs = OrderedDict()
self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast()
self.label_switched_multicast.parent = self
self._children_name_map["label_switched_multicast"] = "label-switched-multicast"
self._segment_path = lambda: "platform"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.Platform, [], name, value)
class LabelSwitchedMulticast(Entity):
"""
Options for label\-switched\-multicast parameters
.. attribute:: frr_holdtime
Set time to keep FRR slots programmed post FRR
**type**\: int
**range:** 3..180
**units**\: second
"""
_prefix = 'fib-common-cfg'
_revision = '2017-05-01'
def __init__(self):
super(Fib.Platform.LabelSwitchedMulticast, self).__init__()
self.yang_name = "label-switched-multicast"
self.yang_parent_name = "platform"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])),
])
self.frr_holdtime = None
self._segment_path = lambda: "label-switched-multicast"
self._absolute_path = lambda: "Cisco-IOS-XR-fib-common-cfg:fib/platform/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value)
def clone_ptr(self):
self._top_entity = Fib()
return self._top_entity
| 1.789063 | 2 |
action/combo.py | dl-stuff/dl9 | 0 | 2034 | """Series of actions that form a combo chain"""
from __future__ import annotations
from typing import Optional, Sequence, TYPE_CHECKING
from action import Action
from core.utility import Array
from core.constants import PlayerForm, SimActKind, MomentType
from core.database import FromDB
if TYPE_CHECKING:
from entity.player import Player
class Combos:
def __init__(self, player: Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) -> None:
self.player = player
self.actions: Array[Action] = Array()
for idx, act_id in enumerate(act_ids):
self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1))
self.ex_actions = None
if ex_act_ids:
self.ex_actions: Array[Action] = Array()
for idx, act_id in enumerate(ex_act_ids):
if not act_id:
self.ex_actions.append(None)
continue
self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1))
def next(self):
if self.player.current in self.actions:
try:
return self.actions[self.player.current.index + 1]
except IndexError:
pass
return self.actions[1]
def __repr__(self) -> str:
if self.ex_actions:
return "->".join(map(repr, self.actions)) + "\tEX[" + "->".join(map(repr, self.ex_actions)) + "]"
return "->".join(map(repr, self.actions))
class UniqueCombos(Combos, FromDB, table="CharaUniqueCombo"):
def __init__(self, id: int, player: Player) -> None:
FromDB.__init__(self, id)
act_ids = (self._data["_ActionId"] + i for i in range(self._data["_MaxComboNum"]))
ex_act_ids = None if not self._data["_ExActionId"] else (self._data["_ExActionId"] + i for i in range(self._data["_MaxComboNum"]))
Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids)
if self._data["_ShiftConditionType"] == 1:
self.player.events.listen(MomentType.HIT, self.enable)
def enable(self, *args, **kwargs):
pass
class DefaultCombos(Combos, FromDB, table="WeaponType"):
def __init__(self, id: int, player: Player) -> None:
FromDB.__init__(self, id)
act_ids = (self._data[f"_DefaultSkill{i+1:02}"] for i in range(5) if self._data[f"_DefaultSkill{i+1:02}"])
ex_act_ids = None if not self._data["_DefaultSkill05Ex"] else (0, 0, 0, 0, self._data["_DefaultSkill05Ex"])
Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids)
class DragonCombos(Combos):
def __init__(self, id: int, combo_max: int, player: Player) -> None:
act_ids = (id + i for i in range(combo_max))
Combos.__init__(self, player, PlayerForm.DRG, act_ids)
| 2.484375 | 2 |
flask_unchained/bundles/session/config.py | achiang/flask-unchained | 0 | 2035 | <filename>flask_unchained/bundles/session/config.py
import os
from datetime import timedelta
from flask_unchained import BundleConfig
try:
from flask_unchained.bundles.sqlalchemy import db
except ImportError:
db = None
class _DefaultFlaskConfigForSessions(BundleConfig):
SESSION_COOKIE_NAME = 'session'
"""
The name of the session cookie.
Defaults to ``'session'``.
"""
SESSION_COOKIE_DOMAIN = None
"""
The domain for the session cookie. If this is not set, the cookie will be
valid for all subdomains of ``SERVER_NAME``.
Defaults to ``None``.
"""
SESSION_COOKIE_PATH = None
"""
The path for the session cookie. If this is not set the cookie will be valid
for all of ``APPLICATION_ROOT`` or if that is not set for '/'.
Defaults to ``None``.
"""
SESSION_COOKIE_HTTPONLY = True
"""
Controls if the cookie should be set with the ``httponly`` flag. Browsers will
not allow JavaScript access to cookies marked as ``httponly`` for security.
Defaults to ``True``.
"""
SESSION_COOKIE_SECURE = False
"""
Controls if the cookie should be set with the ``secure`` flag. Browsers will
only send cookies with requests over HTTPS if the cookie is marked ``secure``.
The application must be served over HTTPS for this to make sense.
Defaults to ``False``.
"""
PERMANENT_SESSION_LIFETIME = timedelta(days=31)
"""
The lifetime of a permanent session as ``datetime.timedelta`` object or an
integer representing seconds.
Defaults to 31 days.
"""
SESSION_COOKIE_SAMESITE = None
"""
Restrict how cookies are sent with requests from external sites. Limits the
scope of the cookie such that it will only be attached to requests if those
requests are "same-site". Can be set to ``'Lax'`` (recommended) or ``'Strict'``.
Defaults to ``None``.
"""
SESSION_REFRESH_EACH_REQUEST = True
"""
Controls the set-cookie behavior. If set to ``True`` a permanent session will
be refreshed each request and get their lifetime extended, if set to ``False``
it will only be modified if the session actually modifies. Non permanent sessions
are not affected by this and will always expire if the browser window closes.
Defaults to ``True``.
"""
class Config(_DefaultFlaskConfigForSessions):
"""
Default configuration options for the Session Bundle.
"""
SESSION_TYPE = 'null'
"""
Specifies which type of session interface to use. Built-in session types:
- ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default)
- ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface`
- ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface`
- ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface`
- ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface`
- ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface`
Defaults to ``'null'``.
"""
SESSION_PERMANENT = True
"""
Whether use permanent session or not.
Defaults to ``True``.
"""
SESSION_USE_SIGNER = False
"""
Whether sign the session cookie sid or not. If set to ``True``, you have to
set ``SECRET_KEY``.
Defaults to ``False``.
"""
SESSION_KEY_PREFIX = 'session:'
"""
A prefix that is added before all session keys. This makes it possible to use
the same backend storage server for different apps.
Defaults to ``'session:'``.
"""
SESSION_REDIS = None
"""
A :class:`redis.Redis` instance.
By default, connect to ``127.0.0.1:6379``.
"""
SESSION_MEMCACHED = None
"""
A :class:`memcached.Client` instance.
By default, connect to ``127.0.0.1:11211``.
"""
SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions')
"""
The folder where session files are stored.
Defaults to using a folder named ``flask_sessions`` in your current working
directory.
"""
SESSION_FILE_THRESHOLD = 500
"""
The maximum number of items the session stores before it starts deleting some.
Defaults to 500.
"""
SESSION_FILE_MODE = 0o600
"""
The file mode wanted for the session files. Should be specified as an octal,
eg ``0o600``.
Defaults to ``0o600``.
"""
SESSION_MONGODB = None
"""
A :class:`pymongo.MongoClient` instance.
By default, connect to ``127.0.0.1:27017``.
"""
SESSION_MONGODB_DB = 'flask_session'
"""
The MongoDB database you want to use.
Defaults to ``'flask_session'``.
"""
SESSION_MONGODB_COLLECT = 'sessions'
"""
The MongoDB collection you want to use.
Defaults to ``'sessions'``.
"""
SESSION_SQLALCHEMY = db
"""
A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance.
"""
SESSION_SQLALCHEMY_TABLE = 'flask_sessions'
"""
The name of the SQL table you want to use.
Defaults to ``flask_sessions``.
"""
SESSION_SQLALCHEMY_MODEL = None
"""
Set this if you need to customize the
:class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for
storing sessions in the database.
"""
| 2.21875 | 2 |
sktime/forecasting/base/adapters/_statsmodels.py | tombh/sktime | 1 | 2036 | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
__author__ = ["<NAME>"]
__all__ = ["_StatsModelsAdapter"]
import numpy as np
import pandas as pd
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin
from sktime.forecasting.base._sktime import _SktimeForecaster
class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):
"""Base class for interfacing statsmodels forecasting algorithms"""
_fitted_param_names = ()
def __init__(self):
self._forecaster = None
self._fitted_forecaster = None
super(_StatsModelsAdapter, self).__init__()
def fit(self, y, X=None, fh=None):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored
Returns
-------
self : returns an instance of self.
"""
# statsmodels does not support the pd.Int64Index as required,
# so we coerce them here to pd.RangeIndex
if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index:
y, X = _coerce_int_to_range_index(y, X)
self._set_y_X(y, X)
self._set_fh(fh)
self._fit_forecaster(y, X)
self._is_fitted = True
return self
def _fit_forecaster(self, y_train, X_train=None):
"""Internal fit"""
raise NotImplementedError("abstract method")
def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
"""
Make forecasts.
Parameters
----------
fh : ForecastingHorizon
The forecasters horizon with the steps ahead to to predict.
Default is one-step ahead forecast,
i.e. np.array([1])
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
return_pred_int : bool, optional (default=False)
alpha : int or list, optional (default=0.95)
Returns
-------
y_pred : pd.Series
Returns series of predicted values.
"""
if return_pred_int:
raise NotImplementedError()
# statsmodels requires zero-based indexing starting at the
# beginning of the training series when passing integers
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
y_pred = self._fitted_forecaster.predict(start, end)
# statsmodels forecasts all periods from start to end of forecasting
# horizon, but only return given time points in forecasting horizon
return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]
def get_fitted_params(self):
"""Get fitted parameters
Returns
-------
fitted_params : dict
"""
self.check_is_fitted()
return {
name: self._fitted_forecaster.params.get(name)
for name in self._get_fitted_param_names()
}
def _get_fitted_param_names(self):
"""Get names of fitted parameters"""
return self._fitted_param_names
def _coerce_int_to_range_index(y, X=None):
new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)
try:
np.testing.assert_array_equal(y.index, new_index)
except AssertionError:
raise ValueError(
"Coercion of pd.Int64Index to pd.RangeIndex "
"failed. Please provide `y_train` with a "
"pd.RangeIndex."
)
y.index = new_index
if X is not None:
X.index = new_index
return y, X
| 2.640625 | 3 |
melodic/lib/python2.7/dist-packages/gazebo_msgs/srv/_GetLinkProperties.py | Dieptranivsr/Ros_Diep | 0 | 2037 | <gh_stars>0
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetLinkPropertiesRequest(genpy.Message):
_md5sum = "7d82d60381f1b66a30f2157f60884345"
_type = "gazebo_msgs/GetLinkPropertiesRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string link_name # name of link
# link names are prefixed by model name, e.g. pr2::base_link
"""
__slots__ = ['link_name']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
link_name
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetLinkPropertiesRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.link_name is None:
self.link_name = ''
else:
self.link_name = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.link_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.link_name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.link_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.link_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.link_name = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
class GetLinkPropertiesResponse(genpy.Message):
_md5sum = "a8619f92d17cfcc3958c0fd13299443d"
_type = "gazebo_msgs/GetLinkPropertiesResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """geometry_msgs/Pose com # center of mass location in link frame
# and orientation of the moment of inertias
# relative to the link frame
bool gravity_mode # set gravity mode on/off
float64 mass # linear mass of link
float64 ixx # moment of inertia
float64 ixy # moment of inertia
float64 ixz # moment of inertia
float64 iyy # moment of inertia
float64 iyz # moment of inertia
float64 izz # moment of inertia
bool success # return true if get info is successful
string status_message # comments if available
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message']
_slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetLinkPropertiesResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.com is None:
self.com = geometry_msgs.msg.Pose()
if self.gravity_mode is None:
self.gravity_mode = False
if self.mass is None:
self.mass = 0.
if self.ixx is None:
self.ixx = 0.
if self.ixy is None:
self.ixy = 0.
if self.ixz is None:
self.ixz = 0.
if self.iyy is None:
self.iyy = 0.
if self.iyz is None:
self.iyz = 0.
if self.izz is None:
self.izz = 0.
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.com = geometry_msgs.msg.Pose()
self.gravity_mode = False
self.mass = 0.
self.ixx = 0.
self.ixy = 0.
self.ixz = 0.
self.iyy = 0.
self.iyz = 0.
self.izz = 0.
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.com is None:
self.com = geometry_msgs.msg.Pose()
end = 0
_x = self
start = end
end += 114
(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end])
self.gravity_mode = bool(self.gravity_mode)
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.com is None:
self.com = geometry_msgs.msg.Pose()
end = 0
_x = self
start = end
end += 114
(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end])
self.gravity_mode = bool(self.gravity_mode)
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_7dB7dB = None
def _get_struct_7dB7dB():
global _struct_7dB7dB
if _struct_7dB7dB is None:
_struct_7dB7dB = struct.Struct("<7dB7dB")
return _struct_7dB7dB
class GetLinkProperties(object):
_type = 'gazebo_msgs/GetLinkProperties'
_md5sum = '0e06a70386d0ee3fb880c02f23fcd821'
_request_class = GetLinkPropertiesRequest
_response_class = GetLinkPropertiesResponse
| 2.25 | 2 |
jupytext/kernels.py | st--/jupytext | 5,378 | 2038 | """Find kernel specifications for a given language"""
import os
import sys
from .languages import same_language
from .reraise import reraise
try:
# I prefer not to take a dependency on jupyter_client
from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec
except ImportError as err:
find_kernel_specs = reraise(err)
get_kernel_spec = reraise(err)
def set_kernelspec_from_language(notebook):
"""Set the kernel specification based on the 'main_language' metadata"""
language = notebook.metadata.get("jupytext", {}).get("main_language")
if "kernelspec" not in notebook.metadata and language:
try:
kernelspec = kernelspec_from_language(language)
except ValueError:
return
notebook.metadata["kernelspec"] = kernelspec
notebook.metadata.get("jupytext", {}).pop("main_language")
def kernelspec_from_language(language):
"""Return the python kernel that matches the current env, or the first kernel that matches the given language"""
if language == "python":
# Return the kernel that matches the current Python executable
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
cmd = kernel_specs.argv[0]
if (
kernel_specs.language == "python"
and os.path.isfile(cmd)
and os.path.samefile(cmd, sys.executable)
):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError(
"No kernel found that matches the current python executable {}\n".format(
sys.executable
)
+ "Install one with 'python -m ipykernel install --name kernel_name [--user]'"
)
for name in find_kernel_specs():
kernel_specs = get_kernel_spec(name)
if same_language(kernel_specs.language, language):
return {
"name": name,
"language": language,
"display_name": kernel_specs.display_name,
}
raise ValueError("No kernel found for the language {}".format(language))
| 2.8125 | 3 |
scipy/sparse/_matrix_io.py | dhruv9vats/scipy | 1 | 2039 | import numpy as np
import scipy.sparse
__all__ = ['save_npz', 'load_npz']
# Make loading safe vs. malicious input
PICKLE_KWARGS = dict(allow_pickle=False)
def save_npz(file, matrix, compressed=True):
""" Save a sparse matrix to a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already
there.
matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``)
The sparse matrix to save.
compressed : bool, optional
Allow compressing the file. Default: True
See Also
--------
scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format.
numpy.savez: Save several arrays into a ``.npz`` archive.
numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
arrays_dict = {}
if matrix.format in ('csc', 'csr', 'bsr'):
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
elif matrix.format == 'dia':
arrays_dict.update(offsets=matrix.offsets)
elif matrix.format == 'coo':
arrays_dict.update(row=matrix.row, col=matrix.col)
else:
raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))
arrays_dict.update(
format=matrix.format.encode('ascii'),
shape=matrix.shape,
data=matrix.data
)
if compressed:
np.savez_compressed(file, **arrays_dict)
else:
np.savez(file, **arrays_dict)
def load_npz(file):
""" Load a sparse matrix from a file using ``.npz`` format.
Parameters
----------
file : str or file-like object
Either the file name (string) or an open file (file-like object)
where the data will be loaded.
Returns
-------
result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix
A sparse matrix containing the loaded data.
Raises
------
OSError
If the input file does not exist or cannot be read.
See Also
--------
scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz`` format.
numpy.load: Load several arrays from a ``.npz`` archive.
Examples
--------
Store sparse matrix to disk, and load it again:
>>> import scipy.sparse
>>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
>>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
>>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')
>>> sparse_matrix
<2x3 sparse matrix of type '<class 'numpy.int64'>'
with 2 stored elements in Compressed Sparse Column format>
>>> sparse_matrix.todense()
matrix([[0, 0, 3],
[4, 0, 0]], dtype=int64)
"""
with np.load(file, **PICKLE_KWARGS) as loaded:
try:
matrix_format = loaded['format']
except KeyError as e:
raise ValueError('The file {} does not contain a sparse matrix.'.format(file)) from e
matrix_format = matrix_format.item()
if not isinstance(matrix_format, str):
# Play safe with Python 2 vs 3 backward compatibility;
# files saved with SciPy < 1.0.0 may contain unicode or bytes.
matrix_format = matrix_format.decode('ascii')
try:
cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format))
except AttributeError as e:
raise ValueError('Unknown matrix format "{}"'.format(matrix_format)) from e
if matrix_format in ('csc', 'csr', 'bsr'):
return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])
elif matrix_format == 'dia':
return cls((loaded['data'], loaded['offsets']), shape=loaded['shape'])
elif matrix_format == 'coo':
return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape'])
else:
raise NotImplementedError('Load is not implemented for '
'sparse matrix of format {}.'.format(matrix_format))
| 3.578125 | 4 |
src/simulator/services/resources/atlas.py | ed741/PathBench | 46 | 2040 | from typing import Dict, List
from simulator.services.resources.directory import Directory
from simulator.services.services import Services
class Atlas(Directory):
def __init__(self, services: Services, name: str, parent: str, create: bool = False) -> None:
super().__init__(services, name, parent, create)
if create:
metadata: Dict[str, any] = {
"next_index": 0,
}
self._save_metadata(metadata)
def append(self, obj: any) -> None:
self.save(str(self._get_next_index()), obj)
self._increment_index()
def load_all(self, max_els: int = float("inf")) -> List[any]:
ret: List[any] = []
idx: int = 0
while idx < max_els:
obj: any = self.load(str(idx))
if obj:
ret.append(obj)
idx += 1
else:
break
return ret
def _get_next_index(self) -> int:
metadata: Dict[str, any] = self._get_metadata()
return metadata["next_index"]
def _increment_index(self) -> None:
metadata: Dict[str, any] = self._get_metadata()
metadata["next_index"] += 1
self._save_metadata(metadata)
def _save_metadata(self, metadata: Dict[str, any]) -> None:
super().save("metadata", metadata)
def _get_metadata(self) -> Dict[str, any]:
return super().load("metadata")
| 2.671875 | 3 |
ingestion/src/metadata/great_expectations/builders/table/row_count_to_equal.py | ulixius9/OpenMetadata | 0 | 2041 | # Copyright 2022 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TestCase builder
"""
from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest
from metadata.generated.schema.tests.table import tableRowCountToEqual
from metadata.generated.schema.tests.tableTest import TableTestType
from metadata.great_expectations.builders.table.base_table_test_builders import (
BaseTableTestBuilder,
)
class TableRowCountToEqualBuilder(BaseTableTestBuilder):
"""Builder for `expect_table_row_count_to_equal` GE expectation"""
def _build_test(self) -> CreateTableTestRequest:
"""Specific test builder for the test"""
return self.build_test_request(
config=tableRowCountToEqual.TableRowCountToEqual(
value=self.result["expectation_config"]["kwargs"]["value"],
),
test_type=TableTestType.tableRowCountToEqual,
)
| 1.929688 | 2 |
tensorflow/bbox/jrieke-tf-parse-v2/jrieke_tf_dataset.py | gustavovaliati/obj-det-experiments | 0 | 2042 | '''
This code is based on https://github.com/jrieke/shape-detection/
'''
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import tensorflow as tf
import datetime
class JriekeBboxDataset:
def generate(self):
print('Generating...')
self.WIDTH = 8
self.HEIGHT = 8
num_imgs = 50000
min_object_size = 1
max_object_size = 4
num_objects = 1
self.bboxes = np.zeros((num_imgs, num_objects, 4))
self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0
for i_img in range(num_imgs):
for i_object in range(num_objects):
w, h = np.random.randint(min_object_size, max_object_size, size=2)
x = np.random.randint(0, self.WIDTH - w)
y = np.random.randint(0, self.HEIGHT - h)
self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle to 1
self.bboxes[i_img, i_object] = [x, y, w, h]
print("Shapes: imgs ", self.imgs.shape, " bboxes ", self.bboxes.shape)
#why this?
# X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs)
X = self.imgs
y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH
# Split training and test.
i = int(0.8 * num_imgs)
train_X = X[:i] #80% for training
test_X = X[i:]
train_y = y[:i]
test_y = y[i:]
self.test_imgs = self.imgs[i:]
self.test_bboxes = self.bboxes[i:]
return train_X, train_y, test_X, test_y
def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):
fig = plt.figure(figsize=(12, 3))
fig.suptitle('check if the generated imgs match to the test_X slice image')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 2, 1)
plt.gca().set_title('Returned by the dataset class: used for training')
plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.subplot(1, 2, 2)
plt.gca().set_title('Global image holder: used for plotting.')
plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.show()
print('compare:',TMP,test_imgs_sample)
def IOU(self,bbox1, bbox2):
'''Calculate overlap between two bounding boxes [x, y, w, h] as the area of intersection over the area of unity'''
x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]
x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]
w_I = min(x1 + w1, x2 + w2) - max(x1, x2)
h_I = min(y1 + h1, y2 + h2) - max(y1, y2)
if w_I <= 0 or h_I <= 0: # no overlap
return 0.
I = w_I * h_I
U = w1 * h1 + w2 * h2 - I
return I / U
def convertDefaultAnnotToCoord(self, annot):
'''
annot -> [x, y, w, h]
'''
w = annot[2] * self.WIDTH
h = annot[3] * self.HEIGHT
x = annot[0] * self.HEIGHT
y = annot[1] * self.HEIGHT
return [x,y,w,h]
def convertYoloAnnotToCoord(self, yolo_annot):
'''
yolo_annot -> [x, y, w, h]
'''
w = yolo_annot[2] * self.WIDTH
h = yolo_annot[3] * self.HEIGHT
x = (yolo_annot[0] * self.WIDTH) - (w/2)
y = (yolo_annot[1] * self.HEIGHT) - (h/2)
return [x,y,w,h]
def show_generated(self, i=0):
fig = plt.figure()
fig.subplots_adjust(top=0.85)
fig.suptitle('Generated image sample + GT')
plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
for bbox in self.bboxes[i]:
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
plt.gca().legend(['GT'])
plt.show()
def plot_rectangle(self, img, bbox):
fig = plt.figure()
fig.suptitle('Plotting rectangle.')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 1, 1)
plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))
plt.show()
def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):
fig = plt.figure(figsize=(12, 3))
fig.suptitle('check if the generated imgs match to the test_X slice image')
fig.subplots_adjust(top=0.85)
plt.subplot(1, 2, 1)
plt.gca().set_title('Returned by the dataset class: used for training')
plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.subplot(1, 2, 2)
plt.gca().set_title('Global image holder: used for plotting.')
plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
plt.show()
print('compare:',test_X_sample,test_imgs_sample)
def show_predicted(self, pred_bboxes):
# Show a few images and predicted bounding boxes from the test dataset.
fig = plt.figure(figsize=(12, 3))
fig.subplots_adjust(top=0.85)
fig.suptitle('Prediction demonstration. Random samples.')
legend_plotted = False
for i_subplot in range(1, 11):
plt.subplot(1, 10, i_subplot)
i = np.random.randint(len(pred_bboxes))
plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])
for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]):
# print('before convertion: pred',pred_bbox, 'gt',exp_bbox)
pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox)
# exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox)
print('after convertion: pred',pred_bbox, 'gt',exp_bbox)
plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none'))
#gt
plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none'))
plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r')
if not legend_plotted:
legend_plotted = True
plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True)
plt.show()
# plt.savefig('plots/bw-single-rectangle_prediction_{0:%Y-%m-%d%H:%M:%S}.png'.format(datetime.datetime.now()), dpi=300)
| 3.0625 | 3 |
src/knownnodes.py | skeevey/PyBitmessage | 1 | 2043 | import pickle
import threading
from bmconfigparser import BMConfigParser
import state
knownNodesLock = threading.Lock()
knownNodes = {}
knownNodesTrimAmount = 2000
def saveKnownNodes(dirName = None):
if dirName is None:
dirName = state.appdata
with knownNodesLock:
with open(dirName + 'knownnodes.dat', 'wb') as output:
pickle.dump(knownNodes, output)
def increaseRating(peer):
increaseAmount = 0.1
maxRating = 1
with knownNodesLock:
for stream in knownNodes.keys():
try:
knownNodes[stream][peer]["rating"] = min(knownNodes[stream][peer]["rating"] + increaseAmount, maxRating)
except KeyError:
pass
def decreaseRating(peer):
decreaseAmount = 0.1
minRating = -1
with knownNodesLock:
for stream in knownNodes.keys():
try:
knownNodes[stream][peer]["rating"] = max(knownNodes[stream][peer]["rating"] - decreaseAmount, minRating)
except KeyError:
pass
def trimKnownNodes(recAddrStream = 1):
if len(knownNodes[recAddrStream]) < BMConfigParser().get("knownnodes", "maxnodes"):
return
with knownNodesLock:
oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount]
for oldest in oldestList:
del knownNodes[recAddrStream][oldest]
| 2.671875 | 3 |
chroma_agent/action_plugins/manage_node.py | whamcloud/iml-agent | 1 | 2044 | # Copyright (c) 2018 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import os
from chroma_agent.lib.shell import AgentShell
from chroma_agent.log import console_log
from chroma_agent.device_plugins.action_runner import CallbackAfterResponse
from chroma_agent.lib.pacemaker import PacemakerConfig
def ssi(runlevel):
# force a manual failover by failing a node
AgentShell.try_run(["sync"])
AgentShell.try_run(["sync"])
AgentShell.try_run(["init", runlevel])
def fail_node():
ssi("0")
def stonith(node):
p_cfg = PacemakerConfig()
# TODO: signal that manager that a STONITH has been done so that it
# doesn't treat it as an AWOL
console_log.info("Rebooting %s per a STONITH request" % node)
p_cfg.get_node(node).fence_reboot()
def shutdown_server(halt=True, at_time="now"):
def _shutdown():
console_log.info("Initiating server shutdown per manager request")
# This will initiate a "nice" shutdown with a wall from root, etc.
AgentShell.try_run(["shutdown", "-H" if halt else "-h", at_time])
console_log.info("Terminating")
os._exit(0)
raise CallbackAfterResponse(None, _shutdown)
def reboot_server(at_time="now"):
def _reboot():
console_log.info("Initiating server reboot per manager request")
# reboot(8) just calls shutdown anyhow.
AgentShell.try_run(["shutdown", "-r", at_time])
console_log.info("Terminating")
os._exit(0)
raise CallbackAfterResponse(None, _reboot)
ACTIONS = [reboot_server, shutdown_server, fail_node, stonith]
| 2.03125 | 2 |
census_data_downloader/core/tables.py | ian-r-rose/census-data-downloader | 0 | 2045 | #! /usr/bin/env python
# -*- coding: utf-8 -*
"""
A base class that governs how to download and process tables from a Census API table.
"""
import os
import logging
import pathlib
from . import geotypes
from . import decorators
logger = logging.getLogger(__name__)
class BaseTableConfig(object):
"""
Configures how to download and process tables from the Census API.
"""
THIS_DIR = pathlib.Path(__file__).parent
PARENT_DIR = THIS_DIR.parent
# All available years
YEAR_LIST = [
2017,
2016,
2015,
2014,
2013,
2012,
2011,
2010,
2009
]
# All available geographies
GEOTYPE_LIST = (
"nationwide",
"regions",
"divisions",
"states",
"congressional_districts",
"state_legislative_upper_districts",
"state_legislative_lower_districts",
"counties",
"places",
"urban_areas",
"msas",
"csas",
"pumas",
"nectas",
"cnectas",
"aiannh_homelands",
"tracts",
"zctas",
"unified_school_districts",
"elementary_school_districts",
"secondary_school_districts"
)
def __init__(
self,
api_key=None,
source="acs5",
years=None,
data_dir=None,
force=False
):
"""
Configuration.
"""
# Set the inputs
self.CENSUS_API_KEY = os.getenv("CENSUS_API_KEY", api_key)
if not self.CENSUS_API_KEY:
raise NotImplementedError("Census API key required. Pass it as the first argument.")
self.source = source
self.force = force
#
# Allow custom years for data download, defaulting to most recent year
#
# If they want all the years, give it to them.
if years == "all":
self.years_to_download = self.YEAR_LIST
# If the user provides a year give them that.
elif isinstance(years, int):
self.years_to_download = [years]
# Or if they provide years as a list, give those then.
elif isinstance(years, list):
self.years_to_download = list(map(int, years))
# If they provided nothing, default to the latest year of data
elif years is None:
self.years_to_download = [max(self.YEAR_LIST), ]
# Validate the years
for year in self.years_to_download:
if year not in self.YEAR_LIST:
error_msg = ("Data only available for the years"
f"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.")
raise NotImplementedError(error_msg)
# Set the data directories
if data_dir:
self.data_dir = pathlib.Path(str(data_dir))
else:
self.data_dir = self.PARENT_DIR.joinpath("data")
self.raw_data_dir = self.data_dir.joinpath("raw")
self.processed_data_dir = self.data_dir.joinpath("processed")
# Make sure they exist
if not self.data_dir.exists():
self.data_dir.mkdir()
if not self.raw_data_dir.exists():
self.raw_data_dir.mkdir()
if not self.processed_data_dir.exists():
self.processed_data_dir.mkdir()
@property
def censusreporter_url(self):
"""
Returns the URL of the Census Reporter page explaining the ACS table.
"""
return f"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/"
#
# Geotype downloaders
#
@decorators.downloader
def download_nationwide(self):
"""
Download nationwide data.
"""
return geotypes.NationwideDownloader
@decorators.downloader
def download_regions(self):
"""
Download data for all regions.
"""
return geotypes.RegionsDownloader
@decorators.downloader
def download_divisions(self):
"""
Download data for all divisions.
"""
return geotypes.DivisionsDownloader
@decorators.downloader
def download_states(self):
"""
Download data for all states.
"""
return geotypes.StatesDownloader
@decorators.downloader
def download_congressional_districts(self):
"""
Download data for all Congressional districts.
"""
return geotypes.CongressionalDistrictsDownloader
@decorators.downloader
def download_state_legislative_upper_districts(self):
"""
Download data for all Census upper legislative districts in the provided state.
"""
return geotypes.StateLegislativeUpperDistrictsDownloader
@decorators.downloader
def download_state_legislative_lower_districts(self):
"""
Download data for all Census lower legislative districts in the provided state.
"""
return geotypes.StateLegislativeLowerDistrictsDownloader
@decorators.downloader
def download_counties(self):
"""
Download data for all counties.
"""
return geotypes.CountiesDownloader
@decorators.downloader
def download_places(self):
"""
Download data for all Census designated places.
"""
return geotypes.PlacesDownloader
@decorators.downloader
def download_urban_areas(self):
"""
Download data for all urban areas
"""
return geotypes.UrbanAreasDownloader
@decorators.downloader
def download_msas(self):
"""
Download data for Metropolitian Statistical Areas.
"""
return geotypes.MsasDownloader
@decorators.downloader
def download_csas(self):
"""
Download data for Combined Statistical Areas.
"""
return geotypes.CsasDownloader
@decorators.downloader
def download_pumas(self):
"""
Download data for Public Use Microdata Areas.
"""
return geotypes.PumasDownloader
@decorators.downloader
def download_nectas(self):
"""
Download data for New England cities and towns.
"""
return geotypes.NectasDownloader
@decorators.downloader
def download_cnectas(self):
"""
Download data for combined New England cities and towns.
"""
return geotypes.CnectasDownloader
@decorators.downloader
def download_aiannh_homelands(self):
"""
Download data for American Indian home lands.
"""
return geotypes.AiannhHomelandsDownloader
@decorators.downloader
def download_tracts(self):
"""
Download data for all Census tracts in the provided state.
"""
return geotypes.TractsDownloader
@decorators.downloader
def download_zctas(self):
"""
Download data for Zip Code Tabulation Areas
"""
return geotypes.ZctasDownloader
@decorators.downloader
def download_unified_school_districts(self):
"""
Download data for unified school districts.
"""
return geotypes.UnifiedSchoolDistrictsDownloader
@decorators.downloader
def download_elementary_school_districts(self):
"""
Download data for elementary school districts.
"""
return geotypes.ElementarySchoolDistrictsDownloader
@decorators.downloader
def download_secondary_school_districts(self):
"""
Download data for secondary school districts.
"""
return geotypes.SecondarySchoolDistrictsDownloader
def download_everything(self):
"""
Download 'em all.
"""
for geo in self.GEOTYPE_LIST:
print(geo)
# Get the downloader function
dl = getattr(self, f"download_{geo}", None)
# Validate it
if not dl or not callable(dl):
raise NotImplementedError(f"Invalid geography type: {geo}")
# Run it
try:
dl()
except NotImplementedError:
pass
| 2.859375 | 3 |
sgf2ebook.py | loujine/sgf2ebook | 0 | 2046 | <gh_stars>0
#!/usr/bin/env python3
import argparse
import os
from pathlib import Path
import shutil
import subprocess
import sys
from tempfile import TemporaryDirectory
from uuid import uuid4
from zipfile import ZipFile
import jinja2
import sente # type: ignore
__version__ = (1, 0, 0)
SGF_RENDER_EXECUTABLE = './sgf-render'
TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve()
def load_sgf(sgfpath: Path):
game = sente.sgf.load(str(sgfpath))
comments = {}
seq = game.get_default_sequence()
for idx, move in enumerate(seq, 1):
game.play(move)
if game.comment:
comments[idx] = game.comment
return {
# read only main sequence, not variations
'nb_moves': len(seq),
'metadata': game.get_properties(),
'comments': comments,
}
def main(sgfpath: Path, output_path: Path) -> None:
print()
print(f'Load content of {sgfpath}')
try:
sgf_content = load_sgf(sgfpath)
except (sente.exceptions.InvalidSGFException,
sente.exceptions.IllegalMoveException):
print(f'Could not read {sgfpath}, skipping')
return
nb_moves = sgf_content['nb_moves']
metadata = sgf_content['metadata']
comments = sgf_content['comments']
uuid = uuid4()
with TemporaryDirectory() as tmpdir:
print('Prepare structure of the ebook')
shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True)
template = jinja2.Template(
TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read())
print('Prepare SVG diagrams')
svgdirpath = Path(tmpdir, 'EPUB', 'Images')
for move in range(1, nb_moves + 1):
svgpath = f'diagram_{move:03}.svg'
# generate SVG files with sgf-render
try:
subprocess.check_call([
SGF_RENDER_EXECUTABLE,
str(sgfpath),
'--move-numbers',
'--first-move-number', str(move),
'-n', str(move),
'--style', 'minimalist',
'-o', svgdirpath.joinpath(svgpath),
])
except subprocess.CalledProcessError:
print(f'Move {move} could not be converted to SVG')
continue
# replace move number in SVG
# not possible directly in sgf-render invocation at the moment
svg_content = svgdirpath.joinpath(svgpath).open().read()
svgdirpath.joinpath(svgpath).open('w').write(
svg_content.replace('>1<', f'>{move}<', 1))
# create HTML page with SVG element
html_content = template.render(
title=sgfpath.stem,
svgpath=svgpath,
info=metadata,
first_flag=(move == 1),
last_flag=(move == nb_moves),
comment=comments.get(move, ''),
)
with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd:
fd.write(html_content)
# Declare all HTML/SVG files in master file
print('Prepare content.opf file')
template = jinja2.Template(
TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read())
opf_content = template.render(
title=sgfpath.stem,
creator='sgf2ebook',
UUID=uuid,
svgpath=sorted(svgdirpath.glob('*.svg')),
enumerate=enumerate,
)
with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd:
fd.write(opf_content)
# Generate table of contents
print('Prepare table of contents')
template = jinja2.Template(
TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read())
toc_content = template.render(
title=sgfpath.stem,
UUID=uuid,
nb_moves=nb_moves,
range=range,
)
with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd:
fd.write(toc_content)
# zip all content in EPUB file
output_path.mkdir(exist_ok=True, parents=True)
output_name = f"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata else ''}{metadata.get('RO', '')}.epub".replace(' ', '_')
with ZipFile(output_path.joinpath(output_name), 'w') as zf:
os.chdir(tmpdir)
# "The first file in the OCF ZIP Container MUST be the mimetype file"
zf.write('mimetype')
for root, dirs, files in os.walk('.'):
for file in sorted(files):
if file != 'mimetype':
zf.write(Path(root, file))
os.chdir(Path(__file__).parent)
print(f'{output_path.joinpath(output_name)} generated')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='')
parser.add_argument('--input-path', '-i', help='Input files or directory')
parser.add_argument('--output-path', '-o', help='Output directory')
args = parser.parse_args()
path = Path(args.input_path)
outpath = Path(args.output_path)
if not path.exists():
print(f'Input path {path} not found')
sys.exit(1)
if path.is_file():
main(path, outpath)
if path.is_dir():
for filepath in sorted(path.rglob('*.sgf')):
main(filepath, outpath.joinpath(filepath.parent.relative_to(path)))
| 2.375 | 2 |
vmis_sql_python/evaluation/metrics/popularity.py | bolcom/serenade-experiments-sigmod | 0 | 2047 | class Popularity:
'''
Popularity( length=20 )
Used to iteratively calculate the average overall popularity of an algorithm's recommendations.
Parameters
-----------
length : int
Coverage@length
training_df : dataframe
determines how many distinct item_ids there are in the training data
'''
def __init__(self, length=20, training_df=None):
self.length = length;
self.sum = 0
self.tests = 0
self.train_actions = len(training_df.index)
#group the data by the itemIds
grp = training_df.groupby('ItemId')
#count the occurence of every itemid in the trainingdataset
self.pop_scores = grp.size()
#sort it according to the score
self.pop_scores.sort_values(ascending=False, inplace=True)
#normalize
self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0]
def add(self, result, next_items, for_item=0, session=0, pop_bin=None, position=None):
'''
Update the metric with a result set and the correct next item.
Result must be sorted correctly.
Parameters
--------
result: pandas.Series
Series of scores with the item id as the index
'''
#only keep the k- first predictions
recs = result[:self.length]
#take the unique values out of those top scorers
items = recs.index.unique()
self.sum += ( self.pop_scores[ items ].sum() / len( items ) )
self.tests += 1
def result(self):
'''
Return a tuple of a description string and the current averaged value
'''
return ("Popularity@" + str( self.length ) + ": "), ( self.sum / self.tests )
| 3.953125 | 4 |
dandeliondiary/household/urls.py | amberdiehl/dandeliondiary_project | 0 | 2048 | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^settings$', views.household_dashboard, name='household_dashboard'),
url(r'^myinfo$', views.my_info, name='my_info'),
url(r'^profile$', views.household_profile, name='maintain_household'),
url(r'^members$', views.household_members, name='maintain_members'),
url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'),
url(r'^ajax/models-by-make/(?P<make_id>\d+)/$', views.ajax_models_by_make),
url(r'^ajax/makes-by-type/(?P<type_id>\d+)/$', views.ajax_makes_by_type),
url(r'^ajax/add-make/(?P<type_key>\d+)/(?P<make>[\w ]{1,50})/$', views.ajax_add_make),
url(r'^ajax/add-model/(?P<make_key>\d+)/(?P<model>[\w -]{1,128})/$', views.ajax_add_model),
url(r'^ajax/delete-invite/$', views.ajax_delete_invite),
url(r'^ajax/change-member-status/$', views.ajax_change_member_status),
]
| 1.875 | 2 |
private/templates/NYC/config.py | devinbalkind/eden | 0 | 2049 | # -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import A, URL
from gluon.storage import Storage
from s3 import s3_fullname
T = current.T
settings = current.deployment_settings
"""
Template settings for NYC Prepared
"""
# Pre-Populate
settings.base.prepopulate = ("NYC",)
settings.base.system_name = T("NYC Prepared")
settings.base.system_name_short = T("NYC Prepared")
# Theme (folder to use for views/layout.html)
settings.base.theme = "NYC"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "table_inline"
settings.msg.parser = "NYC"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "UTC -0500"
# Uncomment these to use US-style dates in English
settings.L10n.date_format = "%m-%d-%Y"
# Start week on Sunday
settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
settings.gis.countries = ("US",)
settings.fin.currencies = {
"USD" : T("United States Dollars"),
}
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Authentication settings
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
# Should users be allowed to register themselves?
settings.security.self_registration = "index"
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
settings.auth.registration_requires_approval = True
# Always notify the approver of a new (verified) user, even if the user is automatically approved
#settings.auth.always_notify_approver = False
# Uncomment this to request the Mobile Phone when a user registers
settings.auth.registration_requests_mobile_phone = True
# Uncomment this to request the Organisation when a user registers
settings.auth.registration_requests_organisation = True
# Uncomment this to request the Site when a user registers
#settings.auth.registration_requests_site = True
# Roles that newly-registered users get automatically
#settings.auth.registration_roles = { 0: ["comms_dispatch"]}
#settings.auth.registration_link_user_to = {"staff":T("Staff"),
# #"volunteer":T("Volunteer")
# }
settings.auth.registration_link_user_to_default = "staff"
settings.security.policy = 5 # Controller, Function & Table ACLs
# Enable this to have Open links in IFrames open a full page in a new tab
settings.ui.iframe_opens_full = True
settings.ui.label_attachments = "Media"
settings.ui.update_label = "Edit"
# Uncomment to disable checking that LatLons are within boundaries of their parent
#settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "eden_nyc"
# Uncomment to show created_by/modified_by using Names not Emails
settings.ui.auth_user_represent = "name"
# Record Approval
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",)
# -----------------------------------------------------------------------------
# Audit
def audit_write(method, tablename, form, record, representation):
if not current.auth.user:
# Don't include prepop
return False
if tablename in ("cms_post",
"org_facility",
"org_organisation",
"req_req",
):
# Perform normal Audit
return True
else:
# Don't Audit non user-visible resources
return False
settings.security.audit_write = audit_write
# -----------------------------------------------------------------------------
# CMS
# Uncomment to use Bookmarks in Newsfeed
settings.cms.bookmarks = True
# Uncomment to use have Filter form in Newsfeed be open by default
settings.cms.filter_open = True
# Uncomment to adjust filters in Newsfeed when clicking on locations instead of opening the profile page
settings.cms.location_click_filters = True
# Uncomment to use organisation_id instead of created_by in Newsfeed
settings.cms.organisation = "post_organisation.organisation_id"
# Uncomment to use org_group_id in Newsfeed
settings.cms.organisation_group = "post_organisation_group.group_id"
# Uncomment to use person_id instead of created_by in Newsfeed
settings.cms.person = "person_id"
# Uncomment to use Rich Text editor in Newsfeed
settings.cms.richtext = True
# Uncomment to show Links in Newsfeed
settings.cms.show_links = True
# Uncomment to show Tags in Newsfeed
settings.cms.show_tags = True
# Uncomment to show post Titles in Newsfeed
settings.cms.show_titles = True
# -----------------------------------------------------------------------------
# Inventory Management
# Uncomment to customise the label for Facilities in Inventory Management
settings.inv.facility_label = "Facility"
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
# Uncomment to call Stock Adjustments, 'Stock Counts'
settings.inv.stock_count = True
# Uncomment to not track pack values
settings.inv.track_pack_values = False
settings.inv.send_show_org = False
# Types common to both Send and Receive
settings.inv.shipment_types = {
1: T("Other Warehouse")
}
settings.inv.send_types = {
#21: T("Distribution")
}
settings.inv.send_type_default = 1
settings.inv.item_status = {
#0: current.messages["NONE"],
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
# -----------------------------------------------------------------------------
# Organisations
#
# Enable the use of Organisation Groups
settings.org.groups = "Network"
# Make Services Hierarchical
settings.org.services_hierarchical = True
# Set the label for Sites
settings.org.site_label = "Facility"
#settings.org.site_label = "Location"
# Uncomment to show the date when a Site (Facilities-only for now) was last contacted
settings.org.site_last_contacted = True
# Enable certain fields just for specific Organisations
# empty list => disabled for all (including Admin)
#settings.org.dependent_fields = { \
# "pr_person_details.mother_name" : [],
# "pr_person_details.father_name" : [],
# "pr_person_details.company" : [],
# "pr_person_details.affiliations" : [],
# "vol_volunteer.active" : [],
# "vol_volunteer_cluster.vol_cluster_type_id" : [],
# "vol_volunteer_cluster.vol_cluster_id" : [],
# "vol_volunteer_cluster.vol_cluster_position_id" : [],
# }
# Uncomment to use an Autocomplete for Site lookup fields
settings.org.site_autocomplete = True
# Extra fields to search in Autocompletes & display in Representations
settings.org.site_autocomplete_fields = ("organisation_id$name",
"location_id$addr_street",
)
# Uncomment to hide inv & req tabs from Sites
#settings.org.site_inv_req_tabs = True
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Legend
"""
db = current.db
s3db = current.s3db
table = db.org_facility_type
ltable = db.org_site_facility_type
query = (ltable.site_id == record.site_id) & \
(ltable.facility_type_id == table.id)
rows = db(query).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def org_facility_onvalidation(form):
"""
Default the name to the Street Address
"""
form_vars = form.vars
name = form_vars.get("name", None)
if name:
return
address = form_vars.get("address", None)
if address:
form_vars.name = address
else:
# We need a default
form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Tell the client to request per-feature markers
s3db.configure("org_facility", marker_fn=facility_marker_fn)
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method not in ("read", "update"):
types = r.get_vars.get("site_facility_type.facility_type_id__belongs", None)
if not types:
# Hide Private Residences
from s3 import FS
s3.filter = FS("site_facility_type.facility_type_id$name") != "Private Residence"
if r.interactive:
tablename = "org_facility"
table = s3db[tablename]
if not r.component and r.method in (None, "create", "update"):
from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION_SELECTOR2(levels=levels)
field.widget = S3LocationSelectorWidget2(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
table.organisation_id.widget = S3MultiSelectWidget(multiple=False)
if r.get_vars.get("format", None) == "popup":
# Coming from req/create form
# Hide most Fields
from s3 import S3SQLCustomForm, S3SQLInlineComponent
# We default this onvalidation
table.name.notnull = False
table.name.requires = None
crud_form = S3SQLCustomForm(S3SQLInlineComponent(
"site_facility_type",
label = T("Facility Type"),
fields = [("", "facility_type_id")],
multiple = False,
required = True,
),
"name",
"location_id",
)
s3db.configure(tablename,
crud_form = crud_form,
onvalidation = org_facility_onvalidation,
)
return True
s3.prep = custom_prep
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
from gluon.html import DIV, INPUT
from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
s3db = current.s3db
if r.tablename == "org_organisation":
if r.id:
# Update form
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = current.db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
else:
# Component
if r.component_id:
# Update form
db = current.db
otable = s3db.org_organisation
org = db(otable.id == r.component_id).select(otable.pe_id,
limitby=(0, 1)
).first()
try:
pe_id = org.pe_id
except:
current.log.error("Org %s not found: cannot set rss_import correctly" % r.component_id)
# Default
rss_import = None
else:
ctable = s3db.pr_contact
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
mtable = s3db.org_group_membership
mtable.group_id.widget = S3MultiSelectWidget(multiple=False)
mtable.status_id.widget = S3MultiSelectWidget(multiple=False,
create=dict(c="org",
f="group_membership_status",
label=str(T("Add New Status")),
parent="group_membership",
child="status_id"
))
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink(
"organisation_type",
field = "organisation_type_id",
label = T("Type"),
multiple = False,
#widget = "hierarchy",
),
S3SQLInlineComponentMultiSelectWidget(
# activate hierarchical org_service:
#S3SQLInlineLink(
"service",
label = T("Services"),
field = "service_id",
# activate hierarchical org_service:
#leafonly = False,
#widget = "hierarchy",
),
S3SQLInlineComponent(
"group_membership",
label = T("Network"),
fields = [("", "group_id"),
("", "status_id"),
],
),
S3SQLInlineComponent(
"address",
label = T("Address"),
multiple = False,
# This is just Text - put into the Comments box for now
# Ultimately should go into location_id$addr_street
fields = [("", "comments")],
),
S3SQLInlineComponentMultiSelectWidget(
"location",
label = T("Neighborhoods Served"),
field = "location_id",
filterby = dict(field = "level",
options = "L4"
),
# @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget
#cols = 5,
),
"phone",
S3SQLInlineComponent(
"contact",
name = "phone2",
label = T("Phone2"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import",
value = rss_import,
),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value"),
#(T("Don't Import Feed"), "poll"),
],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"comments",
postprocess = pr_contact_postprocess,
)
from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter
# activate hierarchical org_service:
#from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter
filter_widgets = [
S3TextFilter(["name", "acronym"],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("group_membership.group_id",
label = T("Network"),
represent = "%(name)s",
#hidden = True,
),
S3LocationFilter("organisation_location.location_id",
label = T("Neighborhood"),
levels = ("L3", "L4"),
#hidden = True,
),
S3OptionsFilter("service_organisation.service_id",
#label = T("Service"),
#hidden = True,
),
# activate hierarchical org_service:
#S3HierarchyFilter("service_organisation.service_id",
# #label = T("Service"),
# #hidden = True,
# ),
S3OptionsFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
#hidden = True,
),
]
list_fields = ["name",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
(T("Services"), "service.name"),
"phone",
(T("Email"), "email.value"),
"website"
#(T("Neighborhoods Served"), "location.name"),
]
s3db.configure("org_organisation",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive:
if r.component_name == "facility":
if r.method in (None, "create", "update"):
from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2
table = s3db.org_facility
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION_SELECTOR2(levels=levels)
field.widget = S3LocationSelectorWidget2(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
elif r.component_name == "human_resource":
# Don't assume that user is from same org/site as Contacts they create
r.component.table.site_id.default = None
return result
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "rheader" in output:
# Custom Tabs
tabs = [(T("Basic Details"), None),
(T("Contacts"), "human_resource"),
(T("Facilities"), "facility"),
(T("Projects"), "project"),
(T("Assets"), "asset"),
]
output["rheader"] = s3db.org_rheader(r, tabs=tabs)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_org_group_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if not r.component:
table = s3db.org_group
list_fields = ["name",
"mission",
"website",
"meetings",
]
s3db.configure("org_group",
list_fields = list_fields,
)
if r.interactive:
from gluon.html import DIV, INPUT
from s3 import S3SQLCustomForm, S3SQLInlineComponent
if r.method != "read":
from gluon.validators import IS_EMPTY_OR
from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2
field = table.location_id
field.label = "" # Gets replaced by widget
#field.requires = IS_LOCATION_SELECTOR2(levels = ("L2",))
field.requires = IS_EMPTY_OR(
IS_LOCATION_SELECTOR2(levels = ("L2",))
)
field.widget = S3LocationSelectorWidget2(levels = ("L2",),
points = True,
polygons = True,
)
# Default location to Manhattan
db = current.db
gtable = db.gis_location
query = (gtable.name == "New York") & \
(gtable.level == "L2")
manhattan = db(query).select(gtable.id,
limitby=(0, 1)).first()
if manhattan:
field.default = manhattan.id
table.mission.readable = table.mission.writable = True
table.meetings.readable = table.meetings.writable = True
if r.id:
# Update form
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = current.db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
crud_form = S3SQLCustomForm(
"name",
"location_id",
"mission",
S3SQLInlineComponent(
"contact",
name = "phone",
label = T("Phone"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import",
value = rss_import,
),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"meetings",
"comments",
postprocess = pr_contact_postprocess,
)
s3db.configure("org_group",
crud_form = crud_form,
)
elif r.component_name == "pr_group":
list_fields = [#(T("Network"), "group_team.org_group_id"),
"name",
"description",
"meetings",
(T("Chairperson"), "chairperson"),
"comments",
]
s3db.configure("pr_group",
list_fields = list_fields,
)
elif r.component_name == "organisation":
# Add Network Status to List Fields
list_fields = s3db.get_config("org_organisation", "list_fields")
list_fields.insert(1, "group_membership.status_id")
return result
s3.prep = custom_prep
if current.auth.s3_logged_in():
# Allow components with components (such as org/group) to breakout from tabs
attr["native"] = True
return attr
settings.customise_org_group_controller = customise_org_group_controller
# -----------------------------------------------------------------------------
# Persons
# Uncomment to hide fields in S3AddPersonWidget
settings.pr.request_dob = False
settings.pr.request_gender = False
# Doesn't yet work (form fails to submit)
#settings.pr.select_existing = False
settings.pr.show_emergency_contacts = False
# -----------------------------------------------------------------------------
# Persons
def customise_pr_person_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
s3db = current.s3db
#if r.method == "validate":
# # Can't validate image without the file
# image_field = s3db.pr_image.image
# image_field.requires = None
if r.interactive or r.representation == "aadata":
if not r.component:
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
]
if r.method in ("create", "update"):
get_vars = r.get_vars
# Context from a Profile page?"
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
site_id = get_vars.get("(site)", None)
if site_id:
field = s3db.hrm_human_resource.site_id
field.default = site_id
field.readable = field.writable = False
hr_fields.remove("site_id")
else:
s3db.hrm_human_resource.site_id.default = None
# ImageCrop widget doesn't currently work within an Inline Form
#image_field = s3db.pr_image.image
#from gluon.validators import IS_IMAGE
#image_field.requires = IS_IMAGE()
#image_field.widget = None
from s3 import S3SQLCustomForm, S3SQLInlineComponent
s3_sql_custom_fields = ["first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
#S3SQLInlineComponent(
# "image",
# name = "image",
# label = T("Photo"),
# multiple = False,
# fields = [("", "image")],
# filterby = dict(field = "profile",
# options=[True]
# )
# ),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Office"), "human_resource.site_id"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
s3db.configure(r.tablename,
crud_form = crud_form,
list_fields = list_fields,
)
elif r.component_name == "group_membership":
s3db.pr_group_membership.group_head.label = T("Group Chairperson")
return result
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
# Groups
def chairperson(row):
"""
Virtual Field to show the chairperson of a group
"""
if hasattr(row, "pr_group"):
row = row.pr_group
try:
group_id = row.id
except:
# not available
return current.messages["NONE"]
db = current.db
mtable = current.s3db.pr_group_membership
ptable = db.pr_person
query = (mtable.group_id == group_id) & \
(mtable.group_head == True) & \
(mtable.person_id == ptable.id)
chair = db(query).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.id,
limitby=(0, 1)).first()
if chair:
# Only used in list view so HTML is OK
return A(s3_fullname(chair),
_href=URL(c="hrm", f="person", args=chair.id))
else:
return current.messages["NONE"]
# -----------------------------------------------------------------------------
def customise_pr_group_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent
s3db = current.s3db
s3db.org_group_team.org_group_id.represent = S3Represent(lookup="org_group",
show_link=True)
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("group_team",
label = T("Network"),
fields = [("", "org_group_id")],
# @ToDo: Make this optional?
multiple = False,
),
"meetings",
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"group_team.org_group_id$name",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by network name. You may use % as wildcard. Press 'Search' without input to list all."),
#_class = "filter-search",
),
S3OptionsFilter("group_team.org_group_id",
label = T("Network"),
#hidden = True,
),
]
# Need to re-do list_fields as get over_written by hrm_group_controller()
list_fields = [(T("Network"), "group_team.org_group_id"),
"name",
"description",
"meetings",
(T("Chairperson"), "chairperson"),
"comments",
]
s3db.configure("pr_group",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
s3db.pr_group_membership.group_head.label = T("Group Chairperson")
if r.component_name == "group_membership":
from s3layouts import S3AddResourceLink
s3db.pr_group_membership.person_id.comment = \
S3AddResourceLink(c="pr", f="person",
title=T("Create Person"),
tooltip=current.messages.AUTOCOMPLETE_HELP)
#else:
# # RHeader wants a simplified version, but don't want inconsistent across tabs
# s3db.pr_group_membership.group_head.label = T("Chairperson")
return True
s3.prep = custom_prep
return attr
settings.customise_pr_group_controller = customise_pr_group_controller
# -----------------------------------------------------------------------------
def customise_pr_group_resource(r, tablename):
"""
Customise pr_group resource (in group & org_group controllers)
- runs after controller customisation
- but runs before prep
"""
s3db = current.s3db
table = s3db.pr_group
field = table.group_type
field.default = 3 # Relief Team, to show up in hrm/group
field.readable = field.writable = False
table.name.label = T("Name")
table.description.label = T("Description")
table.meetings.readable = table.meetings.writable = True
# Increase size of widget
from s3 import s3_comments_widget
table.description.widget = s3_comments_widget
from gluon import Field
table.chairperson = Field.Method("chairperson", chairperson)
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_group_team = "group_id",
)
s3db.configure("pr_group",
# Redirect to member list when a new group has been created
create_next = URL(c="hrm", f="group",
args=["[id]", "group_membership"]),
)
settings.customise_pr_group_resource = customise_pr_group_resource
# -----------------------------------------------------------------------------
def pr_contact_postprocess(form):
"""
Import Organisation/Network RSS Feeds
"""
s3db = current.s3db
form_vars = form.vars
rss_url = form_vars.rsscontact_i_value_edit_0 or \
form_vars.rsscontact_i_value_edit_none
if not rss_url:
if form.record:
# Update form
old_rss = form.record.sub_rsscontact
import json
data = old_rss = json.loads(old_rss)["data"]
if data:
# RSS feed is being deleted, so we should disable it
old_rss = data[0]["value"]["value"]
table = s3db.msg_rss_channel
old = current.db(table.url == old_rss).select(table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if old and old.enabled:
s3db.msg_channel_disable("msg_rss_channel", old.channel_id)
return
else:
# Nothing to do :)
return
# Check if we already have a channel for this Contact
db = current.db
name = form_vars.name
table = s3db.msg_rss_channel
name_exists = db(table.name == name).select(table.id,
table.channel_id,
table.enabled,
table.url,
limitby = (0, 1)
).first()
no_import = current.request.post_vars.get("rss_no_import", None)
if name_exists:
if name_exists.url == rss_url:
# No change to either Contact Name or URL
if no_import:
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
return
elif name_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
name_exists.channel_id)
return
# Check if we already have a channel for this URL
url_exists = db(table.url == rss_url).select(table.id,
table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if url_exists:
# We have 2 feeds: 1 for the Contact & 1 for the URL
# Disable the old Contact one and link the URL one to this Contact
# and ensure active or not as appropriate
# Name field is unique so rename old one
name_exists.update_record(name="%s (Old)" % name)
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
url_exists.update_record(name=name)
if no_import:
if url_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
url_exists.channel_id)
return
elif url_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
url_exists.channel_id)
return
else:
# Update the URL
name_exists.update_record(url=rss_url)
if no_import:
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
return
elif name_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
name_exists.channel_id)
return
else:
# Check if we already have a channel for this URL
url_exists = db(table.url == rss_url).select(table.id,
table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if url_exists:
# Either Contact has changed Name or this feed is associated with
# another Contact
# - update Feed name
url_exists.update_record(name=name)
if no_import:
if url_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
url_exists.channel_id)
return
elif url_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
url_exists.channel_id)
return
elif no_import:
# Nothing to do :)
return
#else:
# # Create a new Feed
# pass
# Add RSS Channel
_id = table.insert(name=name, enabled=True, url=rss_url)
record = dict(id=_id)
s3db.update_super(table, record)
# Enable
channel_id = record["channel_id"]
s3db.msg_channel_enable("msg_rss_channel", channel_id)
# Setup Parser
table = s3db.msg_parser
_id = table.insert(channel_id=channel_id,
function_name="parse_rss",
enabled=True)
s3db.msg_parser_enable(_id)
# Check Now
async = current.s3task.async
async("msg_poll", args=["msg_rss_channel", channel_id])
async("msg_parse", args=[channel_id, "parse_rss"])
# -----------------------------------------------------------------------------
# Human Resource Management
# Uncomment to chage the label for 'Staff'
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to allow Staff & Volunteers to be registered without an Organisation
settings.hrm.org_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Certificates
settings.hrm.use_certificates = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to enable the use of HR Education
settings.hrm.use_education = False
# Uncomment to disable the use of HR Skills
#settings.hrm.use_skills = False
# Uncomment to disable the use of HR Trainings
settings.hrm.use_trainings = False
# Uncomment to disable the use of HR Description
settings.hrm.use_description = False
# Change the label of "Teams" to "Groups"
settings.hrm.teams = "Groups"
# Custom label for Organisations in HR module
#settings.hrm.organisation_label = "National Society / Branch"
settings.hrm.organisation_label = "Organization"
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component:
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
],
label = T("Name"),
),
S3OptionsFilter("organisation_id",
filter = True,
header = "",
hidden = True,
),
S3OptionsFilter("group_person.group_id",
label = T("Network"),
#filter = True,
#header = "",
hidden = True,
),
S3LocationFilter("location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
hidden = True,
),
S3OptionsFilter("site_id",
hidden = True,
),
S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
),
S3OptionsFilter("group_membership.group_id",
label = T("Team"),
filter = True,
header = "",
hidden = True,
),
]
s3db = current.s3db
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
)
field = r.table.site_id
# Don't assume that user is from same org/site as Contacts they create
field.default = None
# Use a hierarchical dropdown instead of AC
field.widget = None
script = \
'''$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupResource':'site',
'lookupURL':'/%s/org/sites_for_org/',
'optional':true
})''' % r.application
s3.jquery_ready.append(script)
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
"""
Customise hrm_human_resource resource (in facility, human_resource, organisation & person controllers)
- runs after controller customisation
- but runs before prep
"""
s3db = current.s3db
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("person_id",
"organisation_id",
"site_id",
S3SQLInlineComponent(
"group_person",
label = T("Network"),
link = False,
fields = [("", "group_id")],
multiple = False,
),
"job_title_id",
"start_date",
)
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
(T("Network"), "group_person.group_id"),
(T("Groups"), "person_id$group_membership.group_id"),
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
s3db.configure("hrm_human_resource",
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
table = current.s3db.hrm_job_title
table.organisation_id.readable = table.organisation_id.writable = False
table.type.readable = table.type.writable = False
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
# Projects
# Use codes for projects (called 'blurb' in NYC)
settings.project.codes = True
# Uncomment this to use settings suitable for detailed Task management
settings.project.mode_task = False
# Uncomment this to use Activities for projects
settings.project.activities = True
# Uncomment this to use Milestones in project/task.
settings.project.milestones = False
# Uncomment this to disable Sectors in projects
settings.project.sectors = False
# Multiple partner organizations
settings.project.multiple_organisations = True
def customise_project_project_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if not r.component and (r.interactive or r.representation == "aadata"):
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox
s3db = current.s3db
table = r.table
tablename = "project_project"
table.code.label = T("Project blurb (max. 100 characters)")
table.code.max_length = 100
table.comments.label = T("How people can help")
script = '''$('#project_project_code').attr('maxlength','100')'''
s3.jquery_ready.append(script)
crud_form = S3SQLCustomForm(
"organisation_id",
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
"calendar",
#"drr.hfa",
#"objectives",
"human_resource_id",
# Activities
S3SQLInlineComponent(
"location",
label = T("Location"),
fields = [("", "location_id")],
),
# Partner Orgs
S3SQLInlineComponent(
"organisation",
name = "partner",
label = T("Partner Organizations"),
fields = ["organisation_id",
"comments", # NB This is labelled 'Role' in DRRPP
],
filterby = dict(field = "role",
options = "2"
)
),
S3SQLInlineComponent(
"document",
name = "media",
label = T("URLs (media, fundraising, website, social media, etc."),
fields = ["document_id",
"name",
"url",
"comments",
],
filterby = dict(field = "name")
),
S3SQLInlineComponentCheckbox(
"activity_type",
label = T("Categories"),
field = "activity_type_id",
cols = 3,
# Filter Activity Type by Project
filter = {"linktable": "project_activity_type_project",
"lkey": "project_id",
"rkey": "activity_type_id",
},
),
#"budget",
#"currency",
"comments",
)
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter
filter_widgets = [
S3TextFilter(["name",
"code",
"description",
"organisation.name",
"organisation.acronym",
],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("status_id",
label = T("Status"),
# Not translateable
#represent = "%(name)s",
cols = 3,
),
#S3OptionsFilter("theme_project.theme_id",
# label = T("Theme"),
# #hidden = True,
# ),
S3LocationFilter("location.location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
#hidden = True,
),
# @ToDo: Widget to handle Start & End in 1!
S3DateFilter("start_date",
label = T("Start Date"),
hide_time = True,
#hidden = True,
),
S3DateFilter("end_date",
label = T("End Date"),
hide_time = True,
#hidden = True,
),
]
list_fields = ["id",
"name",
"code",
"organisation_id",
"start_date",
"end_date",
(T("Locations"), "location.location_id"),
]
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
# Requests Management
settings.req.req_type = ["People", "Stock"]#, "Summary"]
settings.req.prompt_match = False
#settings.req.use_commit = False
settings.req.requester_optional = True
settings.req.date_writable = False
settings.req.item_quantities_writable = True
settings.req.skill_quantities_writable = True
settings.req.items_ask_purpose = False
#settings.req.use_req_number = False
# Label for Requester
settings.req.requester_label = "Site Contact"
# Filter Requester as being from the Site
settings.req.requester_from_site = True
# Label for Inventory Requests
settings.req.type_inv_label = "Supplies"
# Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities
settings.req.summary = True
# -----------------------------------------------------------------------------
def req_req_postprocess(form):
"""
Runs after crud_form completes
- creates a cms_post in the newswire
- @ToDo: Send out Tweets
"""
req_id = form.vars.id
db = current.db
s3db = current.s3db
rtable = s3db.req_req
# Read the full record
row = db(rtable.id == req_id).select(rtable.type,
rtable.site_id,
rtable.requester_id,
rtable.priority,
rtable.date_required,
rtable.purpose,
rtable.comments,
limitby=(0, 1)
).first()
# Build Title & Body from the Request details
priority = rtable.priority.represent(row.priority)
date_required = row.date_required
if date_required:
date = rtable.date_required.represent(date_required)
title = "%(priority)s by %(date)s" % dict(priority=priority,
date=date)
else:
title = priority
body = row.comments
if row.type == 1:
# Items
ritable = s3db.req_req_item
items = db(ritable.req_id == req_id).select(ritable.item_id,
ritable.item_pack_id,
ritable.quantity)
item_represent = s3db.supply_item_represent
pack_represent = s3db.supply_item_pack_represent
for item in items:
item = "%s %s %s" % (item.quantity,
pack_represent(item.item_pack_id),
item_represent(item.item_id))
body = "%s\n%s" % (item, body)
else:
# Skills
body = "%s\n%s" % (row.purpose, body)
rstable = s3db.req_req_skill
skills = db(rstable.req_id == req_id).select(rstable.skill_id,
rstable.quantity)
skill_represent = s3db.hrm_multi_skill_represent
for skill in skills:
item = "%s %s" % (skill.quantity, skill_represent(skill.skill_id))
body = "%s\n%s" % (item, body)
# Lookup series_id
stable = s3db.cms_series
try:
series_id = db(stable.name == "Request").select(stable.id,
cache=s3db.cache,
limitby=(0, 1)
).first().id
except:
# Prepop hasn't been run
series_id = None
# Location is that of the site
otable = s3db.org_site
location_id = db(otable.site_id == row.site_id).select(otable.location_id,
limitby=(0, 1)
).first().location_id
# Create Post
ptable = s3db.cms_post
_id = ptable.insert(series_id=series_id,
title=title,
body=body,
location_id=location_id,
person_id=row.requester_id,
)
record = dict(id=_id)
s3db.update_super(ptable, record)
# Add source link
url = "%s%s" % (settings.get_base_public_url(),
URL(c="req", f="req", args=req_id))
s3db.doc_document.insert(doc_id=record["doc_id"],
url=url,
)
# -----------------------------------------------------------------------------
def customise_req_req_resource(r, tablename):
from s3layouts import S3AddResourceLink
current.s3db.req_req.site_id.comment = \
S3AddResourceLink(c="org", f="facility",
vars = dict(child="site_id"),
title=T("Create Facility"),
tooltip=current.messages.AUTOCOMPLETE_HELP)
current.response.s3.req_req_postprocess = req_req_postprocess
if not r.component and r.method in ("create", "update"):
script = \
'''$('#req_req_site_id').change(function(){
var url=$('#person_add').attr('href')
url=url.split('?')
var q=S3.queryString.parse(url[1])
q['(site)']=$(this).val()
url=url[0]+'?'+S3.queryString.stringify(q)
$('#person_add').attr('href',url)})'''
current.response.s3.jquery_ready.append(script)
settings.customise_req_req_resource = customise_req_req_resource
# -----------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Admin"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
# Uncomment to enable internal support requests
#("support", Storage(
# name_nice = T("Support"),
# #description = "Support Requests",
# restricted = True,
# module_type = None # This item is handled separately for the menu
# )),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 9, # 8th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Locations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 4
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Contacts"),
#description = "Human Resources Management",
restricted = True,
module_type = 3,
)),
#("vol", Storage(
# name_nice = T("Volunteers"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
# )),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Inventory"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 10
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 10,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = T("Vehicles"),
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 1,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 10
)),
("assess", Storage(
name_nice = T("Assessments"),
#description = "Rapid Assessments & Flexible Impact Assessments",
restricted = True,
module_type = 5,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
("survey", Storage(
name_nice = T("Surveys"),
#description = "Create, enter, and manage surveys.",
restricted = True,
module_type = 5,
)),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
# )),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# restricted = False,
# module_type = 10,
# )),
#("member", Storage(
# name_nice = T("Members"),
# #description = "Membership Management System",
# restricted = True,
# module_type = 10,
# )),
# @ToDo: Rewrite in a modern style
#("budget", Storage(
# name_nice = T("Budgeting Module"),
# #description = "Allows a Budget to be drawn up",
# restricted = True,
# module_type = 10
# )),
# @ToDo: Port these Assessments to the Survey module
#("building", Storage(
# name_nice = T("Building Assessments"),
# #description = "Building Safety Assessments",
# restricted = True,
# module_type = 10,
# )),
])
| 2.09375 | 2 |
experiments/issue561/v2.py | nitinkaveriappa/downward | 4 | 2050 | <reponame>nitinkaveriappa/downward
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main("issue561-v1", "issue561-v2")
| 1.375 | 1 |
q2_qemistree/tests/test_fingerprint.py | tgroth97/q2-qemistree | 0 | 2051 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2018, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
import qiime2
import os
from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs
from q2_qemistree import (compute_fragmentation_trees,
rerank_molecular_formulas,
predict_fingerprints)
from q2_qemistree._fingerprint import artifactory
class FingerprintTests(TestCase):
def setUp(self):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin')
self.goodsirpath = os.path.join(THIS_DIR, 'data/'
'sirius-linux64-headless-4.0.1/bin')
# MassSpectrometryFeatures
self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/sirius.mgf.qza'))
# SiriusFolder
self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/sirFolder.qza'))
# ZodiacFolder
self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR,
'data/zodFolder.qza'))
def test_artifactory(self):
# everything is working fine
obs = os.environ.get('_JAVA_OPTIONS', '')
res = artifactory(self.goodsirpath, ['--help'],
constructor=OutputDirs, java_flags='-Xms2G')
self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS'))
self.assertTrue(isinstance(res, OutputDirs))
# exceptions are raised
with self.assertRaises(OSError):
res = artifactory(self.badsirpath, ['--help'],
constructor=OutputDirs)
def test_fragmentation_trees(self):
ions = self.ions.view(MGFDirFmt)
result = compute_fragmentation_trees(sirius_path=self.goodsirpath,
features=ions,
ppm_max=15, profile='orbitrap')
contents = os.listdir(result.get_path())
self.assertTrue(('version.txt' in contents))
def test_fragmentation_trees_negative_ionization(self):
ions = self.ions.view(MGFDirFmt)
result = compute_fragmentation_trees(sirius_path=self.goodsirpath,
features=ions,
ppm_max=15, profile='orbitrap',
ionization_mode='negative')
contents = os.listdir(result.get_path())
self.assertTrue(('version.txt' in contents))
def test_fragmentation_trees_exception(self):
ions = self.ions.view(MGFDirFmt)
with self.assertRaises(ValueError):
compute_fragmentation_trees(sirius_path=self.goodsirpath,
features=ions,
ppm_max=15,
profile='orbitrap',
ionization_mode='n3gativ3')
def test_reranking(self):
ions = self.ions.view(MGFDirFmt)
sirout = self.sirout.view(SiriusDirFmt)
result = rerank_molecular_formulas(sirius_path=self.goodsirpath,
fragmentation_trees=sirout,
features=ions)
contents = os.listdir(result.get_path())
self.assertTrue(('zodiac_summary.csv' in contents))
def test_fingerid(self):
zodout = self.zodout.view(ZodiacDirFmt)
result = predict_fingerprints(sirius_path=self.goodsirpath,
molecular_formulas=zodout, ppm_max=15)
contents = os.listdir(result.get_path())
self.assertTrue(('summary_csi_fingerid.csv' in contents))
if __name__ == '__main__':
main()
| 1.6875 | 2 |
chroma-manager/tests/utils/__init__.py | GarimaVishvakarma/intel-chroma | 0 | 2052 | import time
import datetime
import contextlib
@contextlib.contextmanager
def patch(obj, **attrs):
"Monkey patch an object's attributes, restoring them after the block."
stored = {}
for name in attrs:
stored[name] = getattr(obj, name)
setattr(obj, name, attrs[name])
try:
yield
finally:
for name in stored:
setattr(obj, name, stored[name])
@contextlib.contextmanager
def timed(msg='', threshold=0):
"Print elapsed time of a block, if over optional threshold."
start = time.time()
try:
yield
finally:
elapsed = time.time() - start
if elapsed >= threshold:
print datetime.timedelta(seconds=elapsed), msg
| 3.390625 | 3 |
tempo/worker.py | rackerlabs/Tempo | 4 | 2053 | <gh_stars>1-10
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import kombu
from tempo import actions
from tempo import config
from tempo import db
from tempo import notifier
from tempo import queue as tempo_queue
from tempo.openstack.common import cfg
from tempo.openstack.common import exception as common_exception
CFG = config.CFG
logger = logging.getLogger('tempo.worker')
worker_opts = [
cfg.BoolOpt('daemonized',
default=False,
help='Run worker as a daemon'),
cfg.StrOpt('publisher_id',
default='host',
help='Where the notification came from')
]
worker_group = cfg.OptGroup(name='worker', title='Worker options')
CFG.register_group(worker_group)
CFG.register_opts(worker_opts, group=worker_group)
def _perform_task(task):
def _notify(event_type, exception=None):
payload = {'task_uuid': task_uuid}
if exception is not None:
payload['exception'] = exception
publisher_id = CFG.worker.publisher_id
priority = notifier.DEBUG
notifier.notify(publisher_id, event_type, priority, payload)
action = task.action
task_uuid = task.uuid
try:
func = getattr(actions, action)
except AttributeError:
logger.error("unrecognized action '%(action)s' for task task"
" '%(task_uuid)s'" % locals())
return
logger.debug("task '%(task_uuid)s' started: '%(action)s'" % locals())
_notify('Started Task')
try:
func(task)
except Exception as e:
logger.error("task '%(task_uuid)s' errored: %(e)s" % locals())
_notify('Errored Task', exception=e)
else:
logger.debug("task '%(task_uuid)s' finished: returned successfully" %
locals())
_notify('Finished Task')
def _process_message(body, message):
message.ack()
task_uuid = body['task_uuid']
try:
task = db.task_get(task_uuid)
except common_exception.NotFound:
logger.error("Task '%(task_uuid)s' not found" % locals())
return
_perform_task(task)
def _consume_messages(exchange, queue, key):
kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True)
kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key)
connection = tempo_queue.get_connection()
consumer = kombu.Consumer(connection.channel(), kombu_queue)
consumer.register_callback(_process_message)
consumer.consume()
while True:
connection.drain_events()
def consume_messages(exchange, queue, key):
if CFG.worker.daemonized:
# TODO(mdietz): there's a cleaner way to do this, but this works well
# as a way of backgrounding the server for now
import daemon
with daemon.DaemonContext():
_consume_messages(exchange, queue, key)
else:
_consume_messages(exchange, queue, key)
| 1.78125 | 2 |
bin/basenji_motifs.py | AndyPJiang/basenji | 1 | 2054 | #!/usr/bin/env python
# Copyright 2017 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import copy, os, pdb, random, shutil, subprocess, time
import h5py
import matplotlib
matplotlib.use('PDF')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import seaborn as sns
from sklearn import preprocessing
import tensorflow as tf
import basenji
'''
basenji_motifs.py
Collect statistics and make plots to explore the first convolution layer
of the given model using the given sequences.
'''
weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint ""'
weblogo_opts += ' -C "#CB2026" A A'
weblogo_opts += ' -C "#34459C" C C'
weblogo_opts += ' -C "#FBB116" G G'
weblogo_opts += ' -C "#0C8040" T T'
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <data_file>'
parser = OptionParser(usage)
parser.add_option(
'-a',
dest='act_t',
default=0.5,
type='float',
help=
'Activation threshold (as proportion of max) to consider for PWM [Default: %default]'
)
parser.add_option(
'-d',
dest='model_hdf5_file',
default=None,
help='Pre-computed model output as HDF5.')
parser.add_option('-o', dest='out_dir', default='.')
parser.add_option(
'-m',
dest='meme_db',
default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'],
help='MEME database used to annotate motifs')
parser.add_option(
'-p',
dest='plot_heats',
default=False,
action='store_true',
help=
'Plot heat maps describing filter activations in the test sequences [Default: %default]'
)
parser.add_option(
'-s',
dest='sample',
default=None,
type='int',
help='Sample sequences from the test set [Default:%default]')
parser.add_option(
'-t',
dest='trim_filters',
default=False,
action='store_true',
help='Trim uninformative positions off the filter ends [Default: %default]'
)
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error(
'Must provide Basenji parameters and model files and test data in HDF5'
' format.'
)
else:
params_file = args[0]
model_file = args[1]
data_file = args[2]
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
#################################################################
# load data
data_open = h5py.File(data_file)
test_seqs1 = data_open['test_in']
test_targets = data_open['test_out']
try:
target_names = list(data_open['target_labels'])
except KeyError:
target_names = ['t%d' % ti for ti in range(test_targets.shape[1])]
if options.sample is not None:
# choose sampled indexes
sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample))
# filter
test_seqs1 = test_seqs1[sample_i]
test_targets = test_targets[sample_i]
# convert to letters
test_seqs = basenji.dna_io.hot1_dna(test_seqs1)
#################################################################
# model parameters and placeholders
job = basenji.dna_io.read_job_params(params_file)
job['seq_length'] = test_seqs1.shape[1]
job['seq_depth'] = test_seqs1.shape[2]
job['num_targets'] = test_targets.shape[2]
job['target_pool'] = int(np.array(data_open.get('pool_width', 1)))
t0 = time.time()
dr = basenji.seqnn.SeqNN()
dr.build(job)
print('Model building time %ds' % (time.time() - t0))
# adjust for fourier
job['fourier'] = 'train_out_imag' in data_open
if job['fourier']:
test_targets_imag = data_open['test_out_imag']
if options.valid:
test_targets_imag = data_open['valid_out_imag']
#################################################################
# predict
# initialize batcher
if job['fourier']:
batcher_test = basenji.batcher.BatcherF(
test_seqs1,
test_targets,
test_targets_imag,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
else:
batcher_test = basenji.batcher.Batcher(
test_seqs1,
test_targets,
batch_size=dr.batch_size,
pool_width=job['target_pool'])
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# get weights
filter_weights = sess.run(dr.filter_weights[0])
filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0])
print(filter_weights.shape)
# test
t0 = time.time()
layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0])
filter_outs = layer_filter_outs[0]
print(filter_outs.shape)
# store useful variables
num_filters = filter_weights.shape[0]
filter_size = filter_weights.shape[2]
#################################################################
# individual filter plots
#################################################################
# also save information contents
filters_ic = []
meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs)
for f in range(num_filters):
print('Filter %d' % f)
# plot filter parameters as a heatmap
plot_filter_heat(filter_weights[f, :, :],
'%s/filter%d_heat.pdf' % (options.out_dir, f))
# write possum motif file
filter_possum(filter_weights[f, :, :], 'filter%d' % f,
'%s/filter%d_possum.txt' % (options.out_dir,
f), options.trim_filters)
# plot weblogo of high scoring outputs
plot_filter_logo(
filter_outs[:, :, f],
filter_size,
test_seqs,
'%s/filter%d_logo' % (options.out_dir, f),
maxpct_t=options.act_t)
# make a PWM for the filter
filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' %
(options.out_dir, f))
if nsites < 10:
# no information
filters_ic.append(0)
else:
# compute and save information content
filters_ic.append(info_content(filter_pwm))
# add to the meme motif file
meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters)
meme_out.close()
#################################################################
# annotate filters
#################################################################
# run tomtom
subprocess.call(
'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %
(options.out_dir, options.out_dir, options.meme_db),
shell=True)
# read in annotations
filter_names = name_filters(
num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db)
#################################################################
# print a table of information
#################################################################
table_out = open('%s/table.txt' % options.out_dir, 'w')
# print header for later panda reading
header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std')
print('%3s %19s %10s %5s %6s %6s' % header_cols, file=table_out)
for f in range(num_filters):
# collapse to a consensus motif
consensus = filter_motif(filter_weights[f, :, :])
# grab annotation
annotation = '.'
name_pieces = filter_names[f].split('_')
if len(name_pieces) > 1:
annotation = name_pieces[1]
# plot density of filter output scores
fmean, fstd = plot_score_density(
np.ravel(filter_outs[:, :, f]),
'%s/filter%d_dens.pdf' % (options.out_dir, f))
row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd)
print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out)
table_out.close()
#################################################################
# global filter plots
#################################################################
if options.plot_heats:
# plot filter-sequence heatmap
plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir)
# plot filter-segment heatmap
plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir)
plot_filter_seg_heat(
filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False)
# plot filter-target correlation heatmap
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean')
plot_target_corr(filter_outs, seq_targets, filter_names, target_names,
'%s/filter_target_cors_max.pdf' % options.out_dir, 'max')
def get_motif_proteins(meme_db_file):
""" Hash motif_id's to protein names using the MEME DB file """
motif_protein = {}
for line in open(meme_db_file):
a = line.split()
if len(a) > 0 and a[0] == 'MOTIF':
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein
def info_content(pwm, transpose=False, bg_gc=0.415):
""" Compute PWM information content.
In the original analysis, I used a bg_gc=0.5. For any
future analysis, I ought to switch to the true hg19
value of 0.415.
"""
pseudoc = 1e-9
if transpose:
pwm = np.transpose(pwm)
bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc]
ic = 0
for i in range(pwm.shape[0]):
for j in range(4):
# ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])
ic += -bg_pwm[j] * np.log2(
bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j])
return ic
def make_filter_pwm(filter_fasta):
""" Make a PWM for this filter from its top hits """
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
pwm_counts = []
nsites = 4 # pseudocounts
for line in open(filter_fasta):
if line[0] != '>':
seq = line.rstrip()
nsites += 1
if len(pwm_counts) == 0:
# initialize with the length
for i in range(len(seq)):
pwm_counts.append(np.array([1.0] * 4))
# count
for i in range(len(seq)):
try:
pwm_counts[i][nts[seq[i]]] += 1
except KeyError:
pwm_counts[i] += np.array([0.25] * 4)
# normalize
pwm_freqs = []
for i in range(len(pwm_counts)):
pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)])
return np.array(pwm_freqs), nsites - 4
def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False):
""" Print a filter to the growing MEME file
Attrs:
meme_out : open file
f (int) : filter index #
filter_pwm (array) : filter PWM array
nsites (int) : number of filter sites
"""
if not trim_filters:
ic_start = 0
ic_end = filter_pwm.shape[0] - 1
else:
ic_t = 0.2
# trim PWM of uninformative prefix
ic_start = 0
while ic_start < filter_pwm.shape[0] and info_content(
filter_pwm[ic_start:ic_start + 1]) < ic_t:
ic_start += 1
# trim PWM of uninformative suffix
ic_end = filter_pwm.shape[0] - 1
while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t:
ic_end -= 1
if ic_start < ic_end:
print('MOTIF filter%d' % f, file=meme_out)
print(
'letter-probability matrix: alength= 4 w= %d nsites= %d' %
(ic_end - ic_start + 1, nsites),
file=meme_out)
for i in range(ic_start, ic_end + 1):
print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out)
print('', file=meme_out)
def meme_intro(meme_file, seqs):
""" Open MEME motif format file and print intro
Attrs:
meme_file (str) : filename
seqs [str] : list of strings for obtaining background freqs
Returns:
mem_out : open MEME file
"""
nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
# count
nt_counts = [1] * 4
for i in range(len(seqs)):
for nt in seqs[i]:
try:
nt_counts[nts[nt]] += 1
except KeyError:
pass
# normalize
nt_sum = float(sum(nt_counts))
nt_freqs = [nt_counts[i] / nt_sum for i in range(4)]
# open file for writing
meme_out = open(meme_file, 'w')
# print intro material
print('MEME version 4', file=meme_out)
print('', file=meme_out)
print('ALPHABET= ACGT', file=meme_out)
print('', file=meme_out)
print('Background letter frequencies:', file=meme_out)
print('A %.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out)
print('', file=meme_out)
return meme_out
def name_filters(num_filters, tomtom_file, meme_db_file):
""" Name the filters using Tomtom matches.
Attrs:
num_filters (int) : total number of filters
tomtom_file (str) : filename of Tomtom output table.
meme_db_file (str) : filename of MEME db
Returns:
filter_names [str] :
"""
# name by number
filter_names = ['f%d' % fi for fi in range(num_filters)]
# name by protein
if tomtom_file is not None and meme_db_file is not None:
motif_protein = get_motif_proteins(meme_db_file)
# hash motifs and q-value's by filter
filter_motifs = {}
tt_in = open(tomtom_file)
tt_in.readline()
for line in tt_in:
a = line.split()
fi = int(a[0][6:])
motif_id = a[1]
qval = float(a[5])
filter_motifs.setdefault(fi, []).append((qval, motif_id))
tt_in.close()
# assign filter's best match
for fi in filter_motifs:
top_motif = sorted(filter_motifs[fi])[0][1]
filter_names[fi] += '_%s' % motif_protein[top_motif]
return np.array(filter_names)
################################################################################
# plot_target_corr
#
# Plot a clustered heatmap of correlations between filter activations and
# targets.
#
# Input
# filter_outs:
# filter_names:
# target_names:
# out_pdf:
################################################################################
def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'):
num_seqs = filter_outs.shape[0]
num_targets = len(target_names)
if seq_op == 'mean':
filter_outs_seq = filter_outs.mean(axis=2)
else:
filter_outs_seq = filter_outs.max(axis=2)
# std is sequence by filter.
filter_seqs_std = filter_outs_seq.std(axis=0)
filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0]
filter_names_live = filter_names[filter_seqs_std > 0]
filter_target_cors = np.zeros((len(filter_names_live), num_targets))
for fi in range(len(filter_names_live)):
for ti in range(num_targets):
cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti])
filter_target_cors[fi, ti] = cor
cor_df = pd.DataFrame(
filter_target_cors, index=filter_names_live, columns=target_names)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10))
plt.savefig(out_pdf)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmap of filter activations in
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):
# compute filter output means per sequence
filter_seqs = filter_outs.mean(axis=2)
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if drop_dead:
filter_stds = filter_seqs.std(axis=1)
filter_seqs = filter_seqs[filter_stds > 0]
# downsample sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hmin = np.percentile(filter_seqs[:, seqs_i], 0.1)
hmax = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
plt.figure()
sns.clustermap(
filter_seqs[:, seqs_i],
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vmin=hmin,
vmax=hmax)
plt.savefig(out_pdf)
#out_png = out_pdf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# plot_filter_seq_heat
#
# Plot a clustered heatmap of filter activations in sequence segments.
#
# Mean doesn't work well for the smaller segments for some reason, but taking
# the max looks OK. Still, similar motifs don't cluster quite as well as you
# might expect.
#
# Input
# filter_outs
################################################################################
def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):
b = filter_outs.shape[0]
f = filter_outs.shape[1]
l = filter_outs.shape[2]
s = 5
while l / float(s) - (l / s) > 0:
s += 1
print('%d segments of length %d' % (s, l / s))
# split into multiple segments
filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s))
# mean across the segments
filter_outs_mean = filter_outs_seg.max(axis=3)
# break each segment into a new instance
filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f))
# whiten
if whiten:
filter_seqs = preprocessing.scale(filter_seqs)
# transpose
filter_seqs = np.transpose(filter_seqs)
if drop_dead:
filter_stds = filter_seqs.std(axis=1)
filter_seqs = filter_seqs[filter_stds > 0]
# downsample sequences
seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)
hmin = np.percentile(filter_seqs[:, seqs_i], 0.1)
hmax = np.percentile(filter_seqs[:, seqs_i], 99.9)
sns.set(font_scale=0.3)
if whiten:
dist = 'euclidean'
else:
dist = 'cosine'
plt.figure()
sns.clustermap(
filter_seqs[:, seqs_i],
metric=dist,
row_cluster=True,
col_cluster=True,
linewidths=0,
xticklabels=False,
vmin=hmin,
vmax=hmax)
plt.savefig(out_pdf)
#out_png = out_pdf[:-2] + 'ng'
#plt.savefig(out_png, dpi=300)
plt.close()
################################################################################
# filter_motif
#
# Collapse the filter parameter matrix to a single DNA motif.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def filter_motif(param_matrix):
nts = 'ACGT'
motif_list = []
for v in range(param_matrix.shape[1]):
max_n = 0
for n in range(1, 4):
if param_matrix[n, v] > param_matrix[max_n, v]:
max_n = n
if param_matrix[max_n, v] > 0:
motif_list.append(nts[max_n])
else:
motif_list.append('N')
return ''.join(motif_list)
################################################################################
# filter_possum
#
# Write a Possum-style motif
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200):
# possible trim
trim_start = 0
trim_end = param_matrix.shape[1] - 1
trim_t = 0.3
if trim_filters:
# trim PWM of uninformative prefix
while trim_start < param_matrix.shape[1] and np.max(
param_matrix[:, trim_start]) - np.min(
param_matrix[:, trim_start]) < trim_t:
trim_start += 1
# trim PWM of uninformative suffix
while trim_end >= 0 and np.max(param_matrix[:, trim_end]) - np.min(
param_matrix[:, trim_end]) < trim_t:
trim_end -= 1
if trim_start < trim_end:
possum_out = open(possum_file, 'w')
print('BEGIN GROUP', file=possum_out)
print('BEGIN FLOAT', file=possum_out)
print('ID %s' % motif_id, file=possum_out)
print('AP DNA', file=possum_out)
print('LE %d' % (trim_end + 1 - trim_start), file=possum_out)
for ci in range(trim_start, trim_end + 1):
print(
'MA %s' % ' '.join(['%.2f' % (mult * n)
for n in param_matrix[:, ci]]),
file=possum_out)
print('END', file=possum_out)
print('END', file=possum_out)
possum_out.close()
################################################################################
# plot_filter_heat
#
# Plot a heatmap of the filter's parameters.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_heat(param_matrix, out_pdf):
param_range = abs(param_matrix).max()
sns.set(font_scale=2)
plt.figure(figsize=(param_matrix.shape[1], 4))
sns.heatmap(
param_matrix,
cmap='PRGn',
linewidths=0.2,
vmin=-param_range,
vmax=param_range)
ax = plt.gca()
ax.set_xticklabels(range(1, param_matrix.shape[1] + 1))
ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10)
plt.savefig(out_pdf)
plt.close()
################################################################################
# plot_filter_logo
#
# Plot a weblogo of the filter's occurrences
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None):
if maxpct_t:
all_outs = np.ravel(filter_outs)
all_outs_mean = all_outs.mean()
all_outs_norm = all_outs - all_outs_mean
raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean
left_pad = (filter_size - 1) // 2
right_pad = filter_size - left_pad
# print fasta file of positive outputs
filter_fasta_out = open('%s.fa' % out_prefix, 'w')
filter_count = 0
for i in range(filter_outs.shape[0]):
for j in range(filter_outs.shape[1]):
if filter_outs[i, j] > raw_t:
# construct kmer
kmer = ''
# determine boundaries, considering padding
fstart = j - left_pad
fend = fstart + filter_size
# if it starts in left_pad
if fstart < 0:
kmer += 'N' * (-fstart)
fstart = 0
# add primary sequence
kmer += seqs[i][fstart:fend]
# if it ends in right_pad
if fend > len(seqs[i]):
kmer += 'N' * (fend - len(seqs[i]))
# output
print('>%d_%d' % (i, j), file=filter_fasta_out)
print(kmer, file=filter_fasta_out)
filter_count += 1
filter_fasta_out.close()
# make weblogo
if filter_count > 0:
weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix,
out_prefix)
subprocess.call(weblogo_cmd, shell=True)
################################################################################
# plot_score_density
#
# Plot the score density and print to the stats table.
#
# Input
# param_matrix: np.array of the filter's parameter matrix
# out_pdf:
################################################################################
def plot_score_density(f_scores, out_pdf):
sns.set(font_scale=1.3)
plt.figure()
sns.distplot(f_scores, kde=False)
plt.xlabel('ReLU output')
plt.savefig(out_pdf)
plt.close()
return f_scores.mean(), f_scores.std()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
# pdb.runcall(main)
| 2 | 2 |
apps/shop/urls.py | Joetib/jshop | 1 | 2055 | from django.urls import path
from . import views
app_name = "shop"
urlpatterns = [
path('', views.HomePage.as_view(), name="home-page"),
path('shop/', views.ProductListView.as_view(), name="product-list"),
path('shop/<int:category_pk>/', views.ProductListView.as_view(), name="product-list"),
path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name="product-detail"),
path('cart/', views.cart_view, name="cart"),
path('cart/add/<int:product_pk>/', views.add_product_to_order, name="add-product-to-cart"),
path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name="add-product-to-cart-json"),
path('checkout/', views.CheckOut.as_view(), name="checkout"),
path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name="checkout"),
path('payment/', views.PaymentChoice.as_view(), name="payment-choice"),
path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name="momo-payment"),
path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name="confirm-momo-payment"),
path('orders/', views.OrderList.as_view(), name="order-list"),
path('orders/<int:pk>/', views.OrderDetail.as_view(), name="order-detail"),
path('orders/<int:order_id>/items/<int:pk>/', views.OrderItemDetail.as_view(), name="order-item-detail"),
]
| 2 | 2 |
surpyval/parametric/expo_weibull.py | dfm/SurPyval | 0 | 2056 | <gh_stars>0
import autograd.numpy as np
from scipy.stats import uniform
from autograd import jacobian
from numpy import euler_gamma
from scipy.special import gamma as gamma_func
from scipy.special import ndtri as z
from scipy import integrate
from scipy.optimize import minimize
from surpyval import parametric as para
from surpyval import nonparametric as nonp
from surpyval.parametric.parametric_fitter import ParametricFitter
from .fitters.mpp import mpp
class ExpoWeibull_(ParametricFitter):
def __init__(self, name):
self.name = name
self.k = 3
self.bounds = ((0, None), (0, None), (0, None),)
self.support = (0, np.inf)
self.plot_x_scale = 'log'
self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001, 0.002,
0.003, 0.005, 0.01, 0.02, 0.03, 0.05,
0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 0.95, 0.99, 0.999, 0.9999]
self.param_names = ['alpha', 'beta', 'mu']
self.param_map = {
'alpha' : 0,
'beta' : 1,
'mu' : 2
}
def _parameter_initialiser(self, x, c=None, n=None, offset=False):
log_x = np.log(x)
log_x[np.isnan(log_x)] = 0
gumb = para.Gumbel.fit(log_x, c, n, how='MLE')
if not gumb.res.success:
gumb = para.Gumbel.fit(log_x, c, n, how='MPP')
mu, sigma = gumb.params
alpha, beta = np.exp(mu), 1. / sigma
if (np.isinf(alpha) | np.isnan(alpha)):
alpha = np.median(x)
if (np.isinf(beta) | np.isnan(beta)):
beta = 1.
if offset:
gamma = np.min(x) - (np.max(x) - np.min(x))/10.
return gamma, alpha, beta, 1.
else:
return alpha, beta, 1.
def sf(self, x, alpha, beta, mu):
r"""
Survival (or reliability) function for the ExpoWeibull Distribution:
.. math::
R(x) = 1 - \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
sf : scalar or numpy array
The value(s) of the reliability function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.sf(x, 3, 4, 1.2)
array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02,
5.34717283e-04])
"""
return 1 - np.power(1 - np.exp(-(x / alpha)**beta), mu)
def ff(self, x, alpha, beta, mu):
r"""
Failure (CDF or unreliability) function for the ExpoWeibull Distribution:
.. math::
F(x) = \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
sf : scalar or numpy array
The value(s) of the failure function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.ff(x, 3, 4, 1.2)
array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528])
"""
return np.power(1 - np.exp(-(x / alpha)**beta), mu)
def cs(self, x, X, alpha, beta, mu):
r"""
Conditional survival (or reliability) function for the ExpoWeibull Distribution:
.. math::
R(x, X) = \frac{R(x + X)}{R(X)}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
sf : scalar or numpy array
The value(s) of the reliability function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.sf(x, 1, 3, 4, 1.2)
array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04,
1.35732908e-07])
"""
return self.sf(x + X, alpha, beta, mu) / self.sf(X, alpha, beta, mu)
def df(self, x, alpha, beta, mu):
r"""
Density function for the ExpoWeibull Distribution:
.. math::
f(x) = \mu \left ( \frac{\beta}{\alpha} \right ) \left ( \frac{x}{\alpha} \right )^{\beta - 1} \left [ 1 - e^{-\left ( \frac{x}{\alpha} \right )^\beta} \right ]^{\mu - 1} e^{- \left ( \frac{x}{\alpha} \right )^\beta}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
df : scalar or numpy array
The value(s) of the density function at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.df(x, 3, 4, 1.2)
array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058])
"""
return (beta * mu * x**(beta - 1)) / (alpha**beta) \
* (1 - np.exp(-(x/alpha)**beta))**(mu - 1) \
* np.exp(-(x/alpha)**beta)
def hf(self, x, alpha, beta, mu):
r"""
Instantaneous hazard rate for the ExpoWeibull Distribution:
.. math::
h(x) = \frac{f(x)}{R(x)}
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
hf : scalar or numpy array
The value(s) of the instantaneous hazard rate at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.hf(x, 3, 4, 1.2)
array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436])
"""
return self.df(x, alpha, beta, mu) / self.sf(x, alpha, beta, mu)
def Hf(self, x, alpha, beta, mu):
r"""
Instantaneous hazard rate for the ExpoWeibull Distribution:
.. math::
H(x) = -\ln \left ( R(x) \right )
Parameters
----------
x : numpy array or scalar
The values at which the function will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
Hf : scalar or numpy array
The value(s) of the cumulative hazard rate at x.
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> x = np.array([1, 2, 3, 4, 5])
>>> ExpoWeibull.Hf(x, 3, 4, 1.2)
array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00,
7.53377239e+00])
"""
return -np.log(self.sf(x, alpha, beta, mu))
def qf(self, p, alpha, beta, mu):
r"""
Instantaneous hazard rate for the ExpoWeibull Distribution:
.. math::
q(p) =
Parameters
----------
p : numpy array or scalar
The percentiles at which the quantile will be calculated
alpha : numpy array or scalar
scale parameter for the ExpoWeibull distribution
beta : numpy array or scalar
shape parameter for the ExpoWeibull distribution
mu : numpy array or scalar
shape parameter for the ExpoWeibull distribution
Returns
-------
Q : scalar or numpy array
The quantiles for the Weibull distribution at each value p
Examples
--------
>>> import numpy as np
>>> from surpyval import ExpoWeibull
>>> p = np.array([.1, .2, .3, .4, .5])
>>> ExpoWeibull.qf(p, 3, 4, 1.2)
array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988])
"""
return alpha * (-np.log(1 - p**(1./mu)))**(1/beta)
def mean(self, alpha, beta, mu):
func = lambda x : x * self.df(x, alpha, beta, mu)
top = 2 * self.qf(0.999, alpha, beta, mu)
return integrate.quadrature(func, 0, top)[0]
def random(self, size, alpha, beta, mu):
U = uniform.rvs(size=size)
return self.qf(U, alpha, beta, mu)
def mpp_x_transform(self, x, gamma=0):
return np.log(x - gamma)
def mpp_y_transform(self, y, *params):
mu = params[-1]
mask = ((y == 0) | (y == 1))
out = np.zeros_like(y)
out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu))))
out[mask] = np.nan
return out
def mpp_inv_y_transform(self, y, *params):
i = len(params)
mu = params[i-1]
return (1 - np.exp(-np.exp(y)))**mu
def unpack_rr(self, params, rr):
#UPDATE ME
if rr == 'y':
beta = params[0]
alpha = np.exp(params[1]/-beta)
elif rr == 'x':
beta = 1./params[0]
alpha = np.exp(params[1] / (beta * params[0]))
return alpha, beta, 1.
ExpoWeibull = ExpoWeibull_('ExpoWeibull') | 1.976563 | 2 |
tests/test_base_table.py | stjordanis/datar | 110 | 2057 | <filename>tests/test_base_table.py<gh_stars>100-1000
import pytest
from datar import stats
from datar.base import *
from datar import f
from datar.datasets import warpbreaks, state_division, state_region, airquality
from .conftest import assert_iterable_equal
def test_table():
# https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table
z = stats.rpois(100, 5)
x = table(z)
assert sum(x.values.flatten()) == 100
#-----------------
with data_context(warpbreaks) as _:
tab = table(f.wool, f.tension)
assert tab.columns.tolist() == ['H', 'L', 'M']
assert tab.index.tolist() == ['A', 'B']
assert_iterable_equal(tab.values.flatten(), [9] * 6)
tab = table(warpbreaks.loc[:, ['wool', 'tension']])
assert tab.columns.tolist() == ['H', 'L', 'M']
assert tab.index.tolist() == ['A', 'B']
assert_iterable_equal(tab.values.flatten(), [9] * 6)
#-----------------
tab = table(state_division, state_region)
assert tab.loc['New England', 'Northeast'] == 6
#-----------------
with data_context(airquality) as _:
qt = stats.quantile(f.Temp)
ct = cut(f.Temp, qt)
tab = table(ct, f.Month)
assert tab.iloc[0,0] == 24
#-----------------
a = letters[:3]
tab = table(a, sample(a))
assert sum(tab.values.flatten()) == 3
#-----------------
tab = table(a, sample(a), dnn=['x', 'y'])
assert tab.index.name == 'x'
assert tab.columns.name == 'y'
#-----------------
a = c(NA, Inf, (1.0/(i+1) for i in range(3)))
a = a * 10
# tab = table(a)
# assert_iterable_equal(tab.values.flatten(), [10] * 4)
tab = table(a, exclude=None)
assert_iterable_equal(tab.values.flatten(), [10] * 5)
#------------------
b = as_factor(rep(c("A","B","C"), 10))
tab = table(b)
assert tab.shape == (1, 3)
assert_iterable_equal(tab.values.flatten(), [10] * 3)
tab = table(b, exclude="B")
assert tab.shape == (1, 2)
assert_iterable_equal(tab.values.flatten(), [10] * 2)
assert 'B' not in tab.columns
#-------------------
d = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E"))
tab = table(d, exclude="B", dnn=['x'])
assert_iterable_equal(tab.columns.to_list(), ["A", "C", "D", "E"])
assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0])
d2 = factor(rep(c("A","B","C"), 10), levels=c("A","B","C","D","E"))
tab = table(d, d2, exclude="B")
assert tab.shape == (4, 4)
tab = table("abc", "cba", dnn='x')
assert tab.shape == (3,3)
assert sum(tab.values.flatten()) == 3
with data_context(airquality) as _:
tab = table(f.Ozone, f.Solar_R, exclude=None)
assert '<NA>' in tab.columns
assert '<NA>' in tab.index
def test_table_error():
from datar.datasets import iris, warpbreaks
with pytest.raises(ValueError):
table(iris)
with pytest.raises(ValueError):
table(warpbreaks, iris)
with pytest.raises(ValueError):
table(warpbreaks.wool, iris)
with pytest.raises(ValueError):
table(iris.iloc[:, []])
with pytest.raises(ValueError):
table(iris.iloc[:, [1,2]], iris)
with pytest.raises(ValueError):
table(iris.iloc[:, [1]], iris, iris)
with pytest.raises(ValueError):
table(iris.iloc[:, [1]], iris.iloc[:, []])
| 2.15625 | 2 |
cqlengine/tests/statements/test_update_statement.py | dokai/cqlengine | 0 | 2058 | <reponame>dokai/cqlengine
from unittest import TestCase
from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause
from cqlengine.operators import *
class UpdateStatementTests(TestCase):
def test_table_rendering(self):
""" tests that fields are properly added to the select statement """
us = UpdateStatement('table')
self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us))
self.assertTrue(str(us).startswith('UPDATE table SET'), str(us))
def test_rendering(self):
us = UpdateStatement('table')
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_assignment_clause(AssignmentClause('c', 'd'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
self.assertEqual(unicode(us), 'UPDATE table SET "a" = :0, "c" = :1 WHERE "a" = :2', unicode(us))
def test_context(self):
us = UpdateStatement('table')
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_assignment_clause(AssignmentClause('c', 'd'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'})
def test_context_update(self):
us = UpdateStatement('table')
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_assignment_clause(AssignmentClause('c', 'd'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
us.update_context_id(3)
self.assertEqual(unicode(us), 'UPDATE table SET "a" = :4, "c" = :5 WHERE "a" = :3')
self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'})
def test_additional_rendering(self):
us = UpdateStatement('table', ttl=60)
us.add_assignment_clause(AssignmentClause('a', 'b'))
us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))
self.assertIn('USING TTL 60', unicode(us))
| 2.78125 | 3 |
packages/facilities/diagnostics/py/custom_checkbox.py | Falcons-Robocup/code | 2 | 2059 | <reponame>Falcons-Robocup/code
# Copyright 2020 <NAME> (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class Checkbox():
def __init__(self, name, position, default=False, label=None, rsize=0.6, enabled=True):
self.name = name # unique ID associated with
# label to display next to the checkbox
if label == None:
self.label = name # reuse
else:
self.label = label
self.callback = None
self.enabled = enabled
self.ticked = default
self.ax = plt.axes(position) # position is a tuple (x,y,w,h)
self.ax.axis('off')
self.canvas = self.ax.figure.canvas
# draw text
if len(self.label):
self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center')
# draw a rectangle, add a bit of spacing
self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True))
# setup event handling
self.canvas.mpl_connect('button_release_event', self._handle_event)
self.redraw()
def __repr__(self):
s = 'checkbox:' + self.name + '=' + str(self.ticked)
if not self.enabled:
s += ' (disabled)'
return s
def on_changed(self, cb):
self.callback = cb
def _handle_event(self, e):
if self.enabled and e.inaxes == self.ax: # TODO: exclude spacing margin for inaxes calculation
self.ticked = not self.ticked
self.redraw()
if self.callback != None:
self.callback(self.name, self.ticked)
def redraw(self):
col = 'grey'
if self.enabled:
col = ['lightgoldenrodyellow', 'blue'][self.ticked]
self.ax.patches[0].set_facecolor(col)
self.ax.figure.canvas.draw()
| 2.984375 | 3 |
generator.py | Axonny/HexagonalHitori | 0 | 2060 | from hitori_generator import Generator
from argparse import ArgumentParser
def generate(n: int, output_file: str) -> None:
if n < 3 or n > 8:
print("It isn't valid size")
exit(4)
generator = Generator(n)
data = generator.generate()
lines = map(lambda x: ' '.join(map(str, x)), data)
with open(output_file, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
def main():
p = ArgumentParser()
p.add_argument('filename', type=str, help='Path to output file')
p.add_argument('-s', "--size", type=int, default=3, help='Generate SxS field. size must be in [3, 8]. Default is 3')
args = p.parse_args()
generate(args.size, args.filename)
if __name__ == '__main__':
main()
| 3.375 | 3 |
opaflib/xmlast.py | feliam/opaf | 2 | 2061 | <filename>opaflib/xmlast.py
from lxml import etree
from opaflib.filters import defilterData
#Logging facility
import logging,code
logger = logging.getLogger("OPAFXML")
class PDFXML(etree.ElementBase):
''' Base pdf-xml class. Every pdf token xml representation will
have a span wich indicates where the original token layed in the file
'''
def _getspan(self):
return tuple([int(i) for i in self.get('span').split('~')])
def _setspan(self, value):
self.set('span',"%d~%d"%value)
def span_move(self,offset, recursive=True):
begin,end = self.span
self.span = (begin+offset,end+offset)
if recursive:
for child in self.getchildren():
child.span_move(offset)
def span_expand(self,span):
begin,end = self.span
self.span = (min(begin,span[0]),max(end,span[1]))
def clear_span(self, recursive=True):
del self.attrib['span']
for child in self.getchildren():
child.clear_span()
span = property(_getspan,_setspan)
def _to_xml(self):
return etree.tostring(self)
xml = property(_to_xml)
def _from_python(self, value):
self.from_python(value)
def _to_python(self):
return self.to_python()
value = property(_to_python,_from_python)
def __getattr__(self, name):
tags = set([e.tag for e in self])
if name in tags:
return self.xpath('./%s'%name)
return getattr(super(PDFXML,self),name)
def get_numgen(self):
''' Search the object and generation number of any pdf element '''
if self.tag.startswith('indirect'):
return self.id
else:
return self.getparent().get_numgen()
#leaf
class PDFString(PDFXML):
def from_python(self, value):
self.text = value.encode('string_escape')
def to_python(self):
return self.text.decode('string_escape')
class PDFName(PDFString):
pass
class PDFData(PDFString):
pass
class PDFBool(PDFString):
def from_python(self, value):
assert type(value) == bool, 'Value must be a boolean'
self.text = ['false','true'][int(value)]
def to_python(self):
return {'false': False, 'true': True}[self.text]
class PDFNull(PDFString):
def from_python(self, value):
assert value is None, 'Value must be None'
self.text = 'null'
def to_python(self):
assert self.text == 'null', 'PDFNull xml not initialized'
return None
class PDFR(PDFString):
def from_python(self, (n,g)):
assert type(n) == int and type(g) == int, 'R must be two numbers, n and g'
assert n >= 0 and n < 65535 , 'Invalid object number (%d)'%n
assert g >= 0 and g < 65535 , 'Invalid generation number (%d)'%g
self.text = "%d %d"%(n,g)
def to_python(self):
return tuple([int(i) for i in self.text.split(' ')])
def solve(self):
''' search the referenced indirect object in the containing pdf '''
pdf = self.xpath('/*')[0]
return pdf.getIndirectObject(self.value)
class PDFNumber(PDFXML):
def from_python(self, value):
assert type(value) in [int, float], 'Wrong type for a number'
self.text = str(value)
def to_python(self):
x = self.text
return float(int(float(x))) == float(x) and int(float(x)) or float(x)
class PDFStartxref(PDFString):
def from_python(self, value):
assert type(value) == int , 'Wrong type for startxref'
self.text = str(value).encode('string_escape')
def to_python(self):
return int(self.text.decode('string_escape'))
class PDFHeader(PDFString):
pass
#tree
class PDFEntry(PDFXML):
def to_python(self):
return tuple([e.value for e in self.getchildren()])
def _getkey(self):
return self[0]
def _setkey(self, key):
assert key.tag == 'name'
self[0] = key
key = property(_getkey,_setkey,None)
def _getval(self):
return self[1]
def _setval(self, val):
self[1] = val
val = property(_getval,_setval,None)
class PDFDictionary(PDFXML):
def to_python(self):
return dict([e.value for e in self.getchildren()])
def has_key(self,key):
return len(self.xpath('./entry/name[position()=1 and text()="%s"]'%key))>0
def __getitem__(self, i):
if str == type(i):
return self.xpath('./entry/name[position()=1 and text()="%s"]/../*[position()=2]'%i)[0]
return super(PDFDictionary,self).__getitem__(i)
def __delitem__(self, i):
if str == type(i):
return self.remove(self.xpath('./entry/name[position()=1 and text()="%s"]/..'%i)[0])
return super(PDFDictionary,self).__delitem__(i)
def __setitem__(self, key, val):
if str == type(key):
self.xpath('./entry/name[position()=1 and text()="%s"]/..'%key)[0].val=val
else:
super(PDFDictionary,self).__setitem__(key,val)
class PDFStream(PDFXML):
def to_python(self):
return {'dictionary':self[0].value, 'data':self[1].value}
def _getdictionary(self):
return self[0]
def _setdictionary(self, d):
assert key.tag == 'dictionary'
self[0] = d
dictionary = property(_getdictionary,_setdictionary,None)
def _getdata(self):
return self[1]
def _setdata(self, data):
assert data.tag == 'data'
self[1] = data
data = property(_getdata,_setdata,None)
def isFiltered(self):
''' Check if stream is filtered '''
return self.dictionary.has_key('Filter')
def getFilters(self):
val = self.dictionary.value
filters = val.get('Filter',None)
params = val.get('DecodeParams',None)
assert any([type(filters) == list and (type(params) == list or params==None ),
type(filters) != list and (type(params) == dict or params==None ) ]), 'Filter/DecodeParms wrong type'
if type(filters) != list:
filters=[filters]
params=params and [params] or [{}]
if params == None:
params = [{}]*len(filters)
assert all([type(x)==str for x in filters]), 'Filter shall be a names'
assert all([type(x)==dict for x in params]), 'Params should be a dictionary.. or null?'
assert len(filters) == len(params),'Number of Decodeparams should match Filters'
return zip(filters,params)
def popFilter(self):
dictionary = self.dictionary
assert dictionary.has_key('Filter'), 'Stream not Filtered!'
selected_filter = None
selected_params = None
deletion_list = []
if dictionary['Length'].value != len(self.data.value):
logger.info("Length field of object %s does not match the actual data size (%d != %d)"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value)))
if type(dictionary['Filter']) == PDFArray:
selected_filter = dictionary['Filter'][0]
del dictionary['Filter'][0]
if dictionary.has_key('DecodeParms'):
assert dictionary['DecodeParms'] == PDFArray, 'Array of filters need array of decoding params'
selected_params = dictionary['DecodeParms'][0]
deletion_list.append((dictionary['DecodeParms'],0))
#del dictionary['DecodeParms'][0]
else:
selected_filter = dictionary['Filter']
del dictionary['Filter']
if dictionary.has_key('DecodeParms'):
selected_params = dictionary['DecodeParms']
deletion_list.append((dictionary, 'DecodeParms'))
#del dictionary['DecodeParms']
if dictionary.has_key('Filter') and \
type(dictionary['Filter']) == PDFArray and \
len(dictionary['Filter']) == 0:
deletion_list.append((dictionary, 'Filter'))
#del dictionary['Filter']
if dictionary.has_key('DecodeParms') and \
type(dictionary['DecodeParms']) == PDFArray and \
len(dictionary['DecodeParms']) == 0:
deletion_list.append((dictionary, 'DecodeParms'))
#del dictionary['DecodeParms']
#FIX recode defilterData .. make it register/unregister able.
#(think /Crypt 7.4.10 Crypt Filter )
self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params)
for v,i in deletion_list:
del v[i]
dictionary['Length'].value = len(self.data.value)
def defilter(self):
try:
while self.isFiltered():
self.popFilter()
except Exception,e:
logger.debug("Couldn't defilter <%s> stream (exception %s)."%(self.value,str(e)))
logger.info("Couldn't defilter <%s> stream."%str(self.get_numgen()))
def isObjStm(self):
''' Return true if this is an object stream (ObjStml) '''
return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm'
def expandObjStm(self):
'''
This parses the ObjStm structure and replace it with all the new
indirect objects.
'''
from opaflib.parser import parse
assert not self.isFiltered(), "ObjStm should not be compressed at this point"
assert self.dictionary.has_key('N'), "N is mandatory in ObjStm dictionary"
assert self.dictionary.has_key('First'), "First is mandatory in ObjStm dictionary"
dictionary = self.dictionary
data = self.data.value
first = dictionary["First"].value
pointers = [int(x) for x in data[:first].split()]
assert len(pointers)%2 == 0 , "Wrong number of integer in the ObjStm begining"
pointers = dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ])
positions = sorted(pointers.keys() + [len(data)])
parsed_objects = []
for p in range(0,len(positions)-1):
logger.info("Adding new object %s from objectstream"%repr((pointers[positions[p]],0)))
io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+" "))
io.id = (pointers[positions[p]],0)
parsed_objects.append(io)
return parsed_objects
class PDFArray(PDFXML):
def to_python(self):
return [e.value for e in self]
class PDFIndirect(PDFXML):
def to_python(self):
assert len(self.getchildren())==1, "Wrong number of children in indirect object"
return (self.id, self.object.value)
def _getobject(self):
return self[0]
def _setobject(self, o):
self[0] = o
object = property(_getobject,_setobject,None)
def _getid(self):
return tuple([int(i) for i in self.get('id').split(' ')])
def _setid(self, o):
self.set('id', "%d %d"%o)
id = property(_getid,_setid,None)
def isStream(self):
return len(self.xpath('./stream'))==1
class PDFPdf(PDFXML):
def to_python(self):
return [e.value for e in self]
def getStartxref(self):
''' Get the last startxref pointer (should be at least one) '''
return self.pdf_update[-1].startxref[-1]
#FIX move all this to pdf_update and do the wrapper here
def getObjectAt(self, pos):
''' Get the object found at certain byte position '''
return self.xpath('//*[starts-with(@span,"%d~")]'%pos)[0]
def getTrailer(self, startxref=None):
''' Get the Trailer dictionary (should be at least one) '''
if startxref == None:
startxref = self.getStartxref().value
xref = self.getObjectAt(startxref)
assert xref.tag in ['xref', 'stream'] and xref[0].tag == 'dictionary'
return xref[0]
def getID(self, startxref=None):
''' Get the pdf ID from the trailer dictionary '''
trailer = self.getTrailer(startxref).value
if trailer.has_key('ID'):
return trailer['ID']
else:
return ['','']
def getIndirectObject(self, ref):
''' Search for an indirect object '''
for u in self.pdf_update:
if u.has_key(ref):
return u[ref]
def getRoot(self):
''' Get the pdf Root node. '''
return self.getIndirectObject(self.getTrailer()['Root'].value).object
def isEncrypted(self):
''' Return true if pdf is encrypted '''
return self.getTrailer().has_key('Encrypt')
def countObjStm(self):
''' Count number of 'compressed' object streams '''
return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../..'))
def countIObj(self):
''' Count number of 'compressed' object streams '''
return len(self.xpath('//indirect_object'))
def graph(xml_pdf,dot='default.dot'):
''' Generate a .dot graph of the pdf '''
dotdata = "digraph {\n"
nodes_added = set()
for io in self.pdf_update.indirect_object:
references = io.xpath(".//R")
orig = "%d %d"%io.id
if len(references) == 0:
dotdata += '\t"%s";\n'%x
nodes_added.add(orig)
else:
for r in references:
dest = "%d %d"%r.value
dotdata += '\t"%s" -> "%s";\n'%(orig, dest)
nodes_added.add(orig)
nodes_added.add(dest)
try:
root = "%d %d"%self.getRoot()
dotdata += '\t"trailer" -> "%s";\n'%root
except Exception,e :
pass
dotdata += '}\n'
logger.info("Writing graph to %s(a dot file). Download graphviz or try this http://rise4fun.com/Agl for render it."%dot)
file(dot,"w").write(dotdata)
def expandAllObjStm(self):
''' Find all object streams and expand them. Each ObjStm will be replaced
by its childs '''
for u in self.pdf_update:
for ref in u.findAllObjStm():
u.expandObjStm(ref)
def defilterAll(self):
''' Find all object streams and expand them. Each ObjStm will be replaced
by its childs '''
for u in self.pdf_update:
for io in u[:]:
if type(io) == PDFIndirect and io.isStream() and io.object.isFiltered():
io.object.defilter()
def decrypt(self):
''' This will try to decrypt V:4 null password encryption '''
import hashlib, struct
from Crypto.Cipher import AES
from Crypto.Util import randpool
import base64
def rc4crypt(data, key):
x = 0
box = range(256)
for i in range(256):
x = (x + box[i] + ord(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
x = 0
y = 0
out = []
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256]))
return ''.join(out)
block_size = 16
key_size = 32
def encrypt(plain_text,key_bytes):
assert len(key_bytes) == key_size
mode = AES.MODE_CBC
pad = block_size - len(plain_text) % block_size
data = plain_text + pad * chr(pad)
iv_bytes = randpool.RandomPool(512).get_bytes(block_size)
encrypted_bytes = iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data)
return encrypted_bytes
def decrypt(encrypted_bytes,key_bytes):
#assert len(key_bytes) == key_size
mode = AES.MODE_CBC
iv_bytes = encrypted_bytes[:block_size]
plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:])
pad = ord(plain_text[-1])
return plain_text[:-pad]
assert self.isEncrypted()
#Get and print the encryption dictionary
encrypt = self.getTrailer()['Encrypt'].solve().object
print "It's ENCRYPTED!"
encrypt_py = encrypt.value
print encrypt_py
#Ok try to decrypt it ...
assert encrypt_py['V'] == 4, "Sorry only Version 4 supported"
assert encrypt_py['R'] == 4, "Sorry only Version 4 supported"
#password length
n = encrypt_py['Length']/8
print "N:",n
#a) Pad or truncate the password string to exactly 32 bytes.
user_password = ""
pad = "<PASSWORD>".decode('hex')
print "PASSWORD: ", user_password.encode('hex')
print "PAD: ", pad.encode('hex')
#b) Initialize the MD5 hash function and pass the result of step (a) as input to this function.
m = hashlib.md5()
m.update((user_password+pad)[:32])
print "MD5 update 1", ((user_password+pad)[:32]).encode('hex')
#c) Pass the value of the encryption dictionary's O entry to the MD5 hash function.
m.update (encrypt_py['O'][:32])
print "MD5 update 2", (encrypt_py['O'][:32]).encode('hex')
#d) Convert the integer value of the P entry to a 32-bit unsigned binary number and pass these bytes to the
# MD5 hash function, low-order byte first. WTF!!??
print "MD5 update 3", struct.pack("<L", 0xffffffff&encrypt_py['P']).encode('hex')
m.update (struct.pack("<L", 0xffffffff&encrypt_py['P'] ))
#e) append ID ?
#TODO, get the ID from the trailer..
ID = ''
m.update (ID)
print "MD5 update 4", ID.encode('hex')
#f) If document metadata is not being encrypted, pass 4 bytes with the value 0xFFFFFFFF to the MD5 hash function.
if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false:
m.update('\xff'*4)
print "MD5 update 5", ('\xff'*4).encode('hex')
print "1rst DIGEST:", m.digest().encode('hex')
h = m.digest()[:n]
for i in range(0,50):
h = hashlib.md5(h[:n]).digest()
print "Encryption KEY(%d)"%i, h.encode('hex')
key = h[:n]
print "Encryption KEY", key.encode('hex')
print "Try to authenticate"
_buf = hashlib.md5(pad + ID).digest()
print "MD5(padding+ID):",_buf.encode('hex')
for i in range(0,20):
_key = ''.join([chr(ord(k)^i) for k in list(key)])
_buf1 = rc4crypt(_buf,_key)
print "RC4 iter(%d) Encrypt data <%s> with key <%s> and it gives data <%s>"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex'))
_buf = _buf1
assert _buf == encrypt_py['U'][:16]
print "Authenticated! (An actual pass is not needed. Using null pass '' )"
print "U", encrypt_py['U'].encode('hex')
print "O", encrypt_py['O'].encode('hex')
def decrypt_xml(xml_element):
n,g = xml_element.get_numgen()
m = hashlib.md5()
m.update(key)
m.update(chr(n&0xff))
m.update(chr((n>>8)&0xff))
m.update(chr((n>>16)&0xff))
m.update(chr(g&0xff))
m.update(chr((g>>8)&0xff))
m.update("sAlT")
real_key = m.digest()
pld = e.value
if pld.endswith("\x0d\x0a"):
pld = pld[:-2]
pld = decrypt(pld,real_key)
e.value=pld
#decrypt every string and stream in place...
for e in self.xpath('//stream/data'):
decrypt_xml(e)
for e in self.xpath('//string'):
decrypt_xml(e)
class PDFUpdate(PDFXML):
def to_python(self):
return dict([e.value for e in self.xpath('./indirect_object')])
def has_key(self,key):
key = "%d %d"%key
return len(self.xpath('./indirect_object[@id="%s"]'%key))>0
def __getitem__(self, key):
if tuple == type(key):
key = "%d %d"%key
return self.xpath('./indirect_object[@id="%s"]'%key)[0]
return super(PDFUpdate,self).__getitem__(key)
def __delitem__(self, key):
if tuple == type(key):
key = "%d %d"%key
return self.remove(self.xpath('./indirect_object[@id="%s"]'%key)[0])
return super(PDFUpdate,self).__delitem__(key)
def __setitem__(self, key, val):
if str == type(key):
self.xpath('./indirect_object[@obj="%s"]'%key)[0][:]=[val] #mmm
else:
super(PDFDictionary,self).__setitem__(key,val)
def getObjectAt(self, pos):
''' Get the object found at certain byte position (only in this update!)'''
return self.xpath('.//*[starts-with(@span,"%d~")]'%pos)[0]
def getTrailer(self, startxref=None):
''' Get the Trailer dictionary (of this update!)'''
if startxref == None:
startxref = self.getStartxref().value
xref = self.getObjectAt(startxref)
return xref.dictionary
def getRoot(self):
''' Get the pdf Root node of this update. '''
return self[self.getTrailer()['Root'].value].object
def countObjStm(self):
''' Count number of 'compressed' object streams '''
return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../..'))
def expandObjStm(self, ref):
io_objstm = self[ref]
assert io_objstm.object.dictionary['Type'].value == 'ObjStm'
#completelly defilter the object stream
while io_objstm.object.isFiltered():
io_objstm.object.popFilter()
#parse the indirect simpe objects inside it
expanded_iobjects = io_objstm.object.expandObjStm()
#replace the object stream by its childs
for new_io in expanded_iobjects:
io_objstm.addnext(new_io)
self.remove(io_objstm)
def findAllObjStm(self):
''' Search 'compressed' object streams ids/refs'''
return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../../..')]
def expandAllObjStm(self):
for ref in self.findAllObjStm():
self.expandObjStm(ref)
#Factory
class PDFXMLFactory():
def __init__(self):
self.parser = etree.XMLParser()
fallback = etree.ElementDefaultClassLookup(PDFXML)
lookup = etree.ElementNamespaceClassLookup(fallback)
namespace = lookup.get_namespace(None)
#leafs
namespace['name'] = PDFName
namespace['string'] = PDFString
namespace['number'] = PDFNumber
namespace['null'] = PDFNull
namespace['bool'] = PDFBool
namespace['R'] = PDFR
namespace['header'] = PDFHeader
namespace['startxref'] = PDFStartxref
namespace['data'] = PDFData
#trees
namespace['entry'] = PDFEntry
namespace['dictionary'] = PDFDictionary
namespace['stream'] = PDFStream
namespace['pdf'] = PDFPdf
namespace['pdf_update'] = PDFUpdate
namespace['indirect_object'] = PDFIndirect
namespace['array'] = PDFArray
self.parser.set_element_class_lookup(lookup)
#leaf
def create_leaf(self, tag, value,**attribs):
assert tag in ['number','string','name','R','startxref','header','data','null','bool'], "Got wrong leaf tag: %s"%tag
xml = self.parser.makeelement(tag)
xml.value=value
xml.span=attribs.setdefault('span', (0xffffffff,-1))
del attribs['span']
for attr_key, attr_val in attribs.items():
xml.set(attr_key, str(attr_val))
return xml
#Tree
def create_tree(self, tag, *childs, **attribs):
assert tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'], "Got wrong tree tag: %s"%tag
xml = self.parser.makeelement(tag)
xml.span=attribs.setdefault('span', (0xffffffff,-1))
del attribs['span']
for attr_key, attr_val in attribs.items():
xml.set(attr_key, str(attr_val))
for child in childs:
xml.append(child)
return xml
def __getattr__(self,tag, *args,**kwargs):
if tag in ['number','string','name','R','startxref','header','data','null','bool']:
return lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs)
elif tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']:
return lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs)
return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs)
PDF = PDFXMLFactory()
def create_leaf(tag, value, **kwargs):
return PDF.create_leaf(tag, value,**kwargs)
def create_tree(tag, childs, **kwargs):
return PDF.create_tree(tag, *childs, **kwargs)
if __name__=="__main__":
name = create_leaf('name', "Name")
string = create_leaf('string', "Felipe")
entry = create_tree('entry',[name,string])
dictionary = create_tree('dictionary',[entry])
stream_data = create_leaf('data',"A"*100)
stream = create_tree('stream',[dictionary,stream_data])
indirect = create_tree('indirect_object', [stream], obj=(1,0))
array = create_tree('array', [create_leaf('number', i) for i in range(0,10)])
xml=indirect
print etree.tostring(xml), xml.value
import code
code.interact(local=locals())
| 2.671875 | 3 |
course-code/imooc-tf-mnist-flask/mnist/module.py | le3t/ko-repo | 30 | 2062 | <filename>course-code/imooc-tf-mnist-flask/mnist/module.py
import tensorflow as tf
# y=ax+b linear model
def regression(x):
a = tf.Variable(tf.zeros([784, 10]), name="a")
b = tf.Variable(tf.zeros([10]), name="b")
y = tf.nn.softmax(tf.matmul(x, a) + b)
return y, [a, b]
# 定义卷积模型
def convolutional(x, keep_prob):
def conv2d(x, w):
return tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
x_image = tf.reshape(x, [-1, 28, 28, 1])
w_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
w_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# 全连接层
w_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
w_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2))
return y, [w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, w_fc2, b_fc2]
| 3.234375 | 3 |
src/sol/handle_metaplex.py | terra-dashboard/staketaxcsv | 140 | 2063 | from common.make_tx import make_swap_tx
from sol.handle_simple import handle_unknown_detect_transfers
def handle_metaplex(exporter, txinfo):
transfers_in, transfers_out, _ = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
else:
handle_unknown_detect_transfers(exporter, txinfo)
def is_nft_mint(txinfo):
log_instructions = txinfo.log_instructions
transfers_in, transfers_out, _ = txinfo.transfers_net
if "MintTo" in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0:
return True
elif ("MintTo" in log_instructions
and len(transfers_out) == 1
and len(transfers_in) == 1
and transfers_in[0][0] == 1):
return True
else:
return False
def handle_nft_mint(exporter, txinfo):
transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
return
handle_unknown_detect_transfers(exporter, txinfo)
| 2.171875 | 2 |
dcor/independence.py | lemiceterieux/dcor | 0 | 2064 | """
Functions for testing independence of several distributions.
The functions in this module provide methods for testing if
the samples generated from two random vectors are independent.
"""
import numpy as np
import scipy.stats
from . import _dcor_internals, _hypothesis
from ._dcor import u_distance_correlation_sqr
from ._utils import _random_state_init, _transform_to_2d
def distance_covariance_test(
x,
y,
*,
num_resamples=0,
exponent=1,
random_state=None,
n_jobs=1,
):
"""
Test of distance covariance independence.
Compute the test of independence based on the distance
covariance, for two random vectors.
The test is a permutation test where the null hypothesis is that the two
random vectors are independent.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
num_resamples: int
Number of permutations resamples to take in the permutation test.
random_state: {None, int, array_like, numpy.random.RandomState}
Random state to generate the permutations.
Returns
-------
HypothesisTest
Results of the hypothesis test.
See Also
--------
distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> dcor.independence.distance_covariance_test(a, a)
HypothesisTest(p_value=1.0, statistic=208.0)
>>> dcor.independence.distance_covariance_test(a, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=11.75323056...)
>>> dcor.independence.distance_covariance_test(b, b)
HypothesisTest(p_value=1.0, statistic=1.3604610...)
>>> dcor.independence.distance_covariance_test(a, b,
... num_resamples=5, random_state=0)
HypothesisTest(p_value=0.5, statistic=11.7532305...)
>>> dcor.independence.distance_covariance_test(a, b,
... num_resamples=5, random_state=13)
HypothesisTest(p_value=0.3333333..., statistic=11.7532305...)
>>> dcor.independence.distance_covariance_test(a, a,
... num_resamples=7, random_state=0)
HypothesisTest(p_value=0.125, statistic=208.0)
"""
x = _transform_to_2d(x)
y = _transform_to_2d(y)
_dcor_internals._check_same_n_elements(x, y)
random_state = _random_state_init(random_state)
# Compute U-centered matrices
u_x = _dcor_internals._distance_matrix_generic(
x,
centering=_dcor_internals.double_centered,
exponent=exponent)
u_y = _dcor_internals._distance_matrix_generic(
y,
centering=_dcor_internals.double_centered,
exponent=exponent)
# Use the dcov statistic
def statistic_function(distance_matrix):
return u_x.shape[0] * _dcor_internals.mean_product(
distance_matrix, u_y)
return _hypothesis._permutation_test_with_sym_matrix(
u_x,
statistic_function=statistic_function,
num_resamples=num_resamples,
random_state=random_state,
n_jobs=n_jobs)
def partial_distance_covariance_test(
x,
y,
z,
*,
num_resamples=0,
exponent=1,
random_state=None,
n_jobs=1,
):
"""
Test of partial distance covariance independence.
Compute the test of independence based on the partial distance
covariance, for two random vectors conditioned on a third.
The test is a permutation test where the null hypothesis is that the first
two random vectors are independent given the third one.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
z: array_like
Observed random vector. The columns correspond with the individual
random variables while the rows are individual instances of the random
vector.
num_resamples: int
Number of permutations resamples to take in the permutation test.
random_state: {None, int, array_like, numpy.random.RandomState}
Random state to generate the permutations.
Returns
-------
HypothesisTest
Results of the hypothesis test.
See Also
--------
partial_distance_covariance
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> c = np.array([[1000, 0, 0, 1000],
... [0, 1000, 1000, 1000],
... [1000, 1000, 1000, 1000],
... [1000, 1000, 0, 1000]])
>>> dcor.independence.partial_distance_covariance_test(a, a, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=142.6664416...)
>>> dcor.independence.partial_distance_covariance_test(a, b, c)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=7.2690070...e-15)
>>> dcor.independence.partial_distance_covariance_test(b, b, c)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=1.0, statistic=2.2533380...e-30)
>>> dcor.independence.partial_distance_covariance_test(a, b, c,
... num_resamples=5, random_state=0)
HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15)
>>> dcor.independence.partial_distance_covariance_test(a, b, c,
... num_resamples=5, random_state=13)
HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15)
>>> dcor.independence.partial_distance_covariance_test(a, c, b,
... num_resamples=7, random_state=0)
HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12)
"""
random_state = _random_state_init(random_state)
# Compute U-centered matrices
u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent)
u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent)
u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent)
# Compute projections
proj = _dcor_internals.u_complementary_projection(u_z)
p_xz = proj(u_x)
p_yz = proj(u_y)
# Use the pdcor statistic
def statistic_function(distance_matrix):
return u_x.shape[0] * _dcor_internals.u_product(
distance_matrix, p_yz)
return _hypothesis._permutation_test_with_sym_matrix(
p_xz,
statistic_function=statistic_function,
num_resamples=num_resamples,
random_state=random_state,
n_jobs=n_jobs)
def distance_correlation_t_statistic(x, y):
"""
Transformation of the bias corrected version of distance correlation used
in :func:`distance_correlation_t_test`.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
numpy scalar
T statistic.
See Also
--------
distance_correlation_t_test
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_statistic(a, a)
inf
>>> dcor.independence.distance_correlation_t_statistic(a, b)
... # doctest: +ELLIPSIS
-0.4430164...
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_statistic(b, b)
inf
"""
bcdcor = u_distance_correlation_sqr(x, y)
n = x.shape[0]
v = n * (n - 3) / 2
return np.sqrt(v - 1) * bcdcor / np.sqrt(1 - bcdcor**2)
def distance_correlation_t_test(x, y):
"""
Test of independence for high dimension based on convergence to a Student t
distribution. The null hypothesis is that the two random vectors are
independent.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
Returns
-------
HypothesisTest
Results of the hypothesis test.
See Also
--------
distance_correlation_t_statistic
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1, 0, 0, 1],
... [0, 1, 1, 1],
... [1, 1, 1, 1],
... [1, 1, 0, 1]])
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_test(a, a)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=0.0, statistic=inf)
>>> dcor.independence.distance_correlation_t_test(a, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...)
>>> with np.errstate(divide='ignore'):
... dcor.independence.distance_correlation_t_test(b, b)
... # doctest: +ELLIPSIS
HypothesisTest(p_value=0.0, statistic=inf)
"""
t_test = distance_correlation_t_statistic(x, y)
n = x.shape[0]
v = n * (n - 3) / 2
df = v - 1
p_value = 1 - scipy.stats.t.cdf(t_test, df=df)
return _hypothesis.HypothesisTest(p_value=p_value, statistic=t_test)
| 3.609375 | 4 |
cinemasci/cis/__init__.py | cinemascience/cinemasc | 0 | 2065 | <reponame>cinemascience/cinemasc
from . import imageview
from . import cisview
from . import renderer
from . import convert
class cis:
"""Composible Image Set Class
The data structure to hold properties of a Composible Image Set.
"""
def __init__(self, filename):
""" The constructor. """
self.fname = filename
self.classname = "COMPOSABLE_IMAGE_SET"
self.dims = [0,0]
self.flags = "CONSTANT_CHANNELS"
self.version = "1.0"
self.parameterlist = []
self.parametertable = None
self.variables = {}
self.images = {}
self.colormaps = {}
def debug_print(self):
""" Debug print statement for CIS properties. """
print("printing cis")
print(" fname: {}".format(self.fname))
print(" classname: {}".format(self.classname))
print(" dims: {}".format(self.dims))
print(" flags: {}".format(self.flags))
print(" version: {}".format(self.version))
print(" colormaps: ")
for m in self.colormaps:
print(m)
for i in self.get_images():
print(" image: {}".format(self.get_image(i).name))
for l in self.get_image(i).get_layers():
print(" layer: {}".format(self.get_image(i).get_layer(l).name))
print("\n")
def get_image(self, key):
""" Returns an image given its key. """
result = False
if key in self.images:
result = self.images[key]
return result
def get_images(self):
""" Returns all images. """
for i in self.images:
yield i
def get_image_names(self):
""" Returns list of image names. """
return list(self.images.keys())
def set_parameter_table(self, table):
""" Set parameter table using a deep copy. """
self.parametertable = table.copy(deep=True)
def add_parameter(self, name, type):
""" Add a parameter to the list of parameters for the CIS. """
# check for duplicates
self.parameterlist.append([name, type])
def add_variable(self, name, type, min, max):
""" Add a variable to the set of variables. """
# check for duplicates
self.variables[name] = {'type':type, 'min':min, 'max':max}
def add_image(self, name):
""" Add an image to the set of images in the CIS. """
# check for duplicates
self.images[name] = image.image(name)
return self.images[name]
def get_variables(self):
""" Return all variables. """
for i in self.variables:
yield i
def get_variable(self, name):
""" Return a variable. """
variable = None
if name in self.variables:
variable = self.variables[name]
return variable
def get_image(self,name):
""" Return an image. """
image = None
if name in self.images:
image = self.images[name]
return image
def get_colormap(self,name):
""" Return a colormap. """
colormap = None
if name in self.colormaps:
colormap = self.colormaps[name]
return colormap
def add_colormap(self, name, path):
""" Add a colormap to the set of colormaps. """
#if colormap not in dict
if (name not in self.colormaps):
self.colormaps[name] = colormap.colormap(path)
def remove_colormap(self, name):
""" Remove a colormap from the set of colormaps. """
self.colormaps.pop(name)
def get_colormaps(self):
""" Return all colormaps. """
for i in self.colormaps:
yield i
def set_dims(self, w, h):
""" Set the dimensions of the CIS given a width and height. """
self.dims = [w, h]
| 2.625 | 3 |
applications/spaghetti.py | fos/fos-legacy | 2 | 2066 | <gh_stars>1-10
import numpy as np
import nibabel as nib
import os.path as op
import pyglet
#pyglet.options['debug_gl'] = True
#pyglet.options['debug_x11'] = True
#pyglet.options['debug_gl_trace'] = True
#pyglet.options['debug_texture'] = True
#fos modules
from fos.actor.axes import Axes
from fos import World, Window, WindowManager
from labeler import TrackLabeler
from fos.actor.slicer import Slicer
#dipy modules
from dipy.segment.quickbundles import QuickBundles
from dipy.io.dpy import Dpy
from dipy.io.pickles import load_pickle,save_pickle
from dipy.viz.colormap import orient2rgb
import copy
if __name__ == '__main__':
subject = 5
seeds = 1
qb_dist = 30
#load T1 volume registered in MNI space
img = nib.load('data/subj_'+("%02d" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz')
data = img.get_data()
affine = img.get_affine()
#load the tracks registered in MNI space
fdpyw = 'data/subj_'+("%02d" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy'
dpr = Dpy(fdpyw, 'r')
T = dpr.read_tracks()
dpr.close()
#load initial QuickBundles with threshold 30mm
fpkl = 'data/subj_'+("%02d" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl'
#qb=QuickBundles(T,30.,12)
qb=load_pickle(fpkl)
#create the interaction system for tracks
tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1)
#add a interactive slicing/masking tool
sl = Slicer(affine,data)
#add one way communication between tl and sl
tl.slicer=sl
#OpenGL coordinate system axes
ax = Axes(100)
x,y,z=data.shape
#add the actors to the world
w=World()
w.add(tl)
w.add(sl)
#w.add(ax)
#create a window
wi = Window(caption="Interactive Spaghetti using Diffusion Imaging in Python (dipy.org) and Free On Shades (fos.me)",\
bgcolor=(0.3,0.3,0.6,1),width=1200,height=800)
#attach the world to the window
wi.attach(w)
#create a manager which can handle multiple windows
wm = WindowManager()
wm.add(wi)
wm.run()
print('Everything is running ;-)')
| 1.664063 | 2 |
faceai/gender.py | dlzdy/faceai | 1 | 2067 | #coding=utf-8
#性别识别
import cv2
from keras.models import load_model
import numpy as np
import chineseText
img = cv2.imread("img/gather.png")
face_classifier = cv2.CascadeClassifier(
"d:\Python36\Lib\site-packages\opencv-master\data\haarcascades\haarcascade_frontalface_default.xml"
)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_classifier.detectMultiScale(
gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140))
gender_classifier = load_model(
"classifier/gender_models/simple_CNN.81-0.96.hdf5")
gender_labels = {0: '女', 1: '男'}
color = (255, 255, 255)
for (x, y, w, h) in faces:
face = img[(y - 60):(y + h + 60), (x - 30):(x + w + 30)]
face = cv2.resize(face, (48, 48))
face = np.expand_dims(face, 0)
face = face / 255.0
gender_label_arg = np.argmax(gender_classifier.predict(face))
gender = gender_labels[gender_label_arg]
cv2.rectangle(img, (x, y), (x + h, y + w), color, 2)
img = chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30)
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 3.15625 | 3 |
csm_web/scheduler/tests/utils.py | mudit2103/csm_web | 0 | 2068 | from django.test import TestCase
from os import path
from rest_framework import status
from rest_framework.test import APIClient
import random
from scheduler.models import Profile
from scheduler.factories import (
CourseFactory,
SpacetimeFactory,
UserFactory,
ProfileFactory,
SectionFactory,
AttendanceFactory,
OverrideFactory,
create_attendances_for,
)
random.seed(0)
COURSE_NAMES = ("CS88", "CS61A", "CS61B", "CS70", "CS61C", "EE16A")
ROLE_MAP = Profile.ROLE_MAP
BASE_PATH = "/scheduler"
# ----- REQUEST UTILITIES -----
def fail_msg(ep, resp):
return "Endpoint: {}\nResponse Content: {}".format(ep, resp.content)
class APITestCase(TestCase):
def get_client_for(self, user):
"""Returns an APIClient object that is logged in as the provided user."""
client = APIClient()
client.force_authenticate(user)
return client
def request(self, method, endpoint, exp_code=None, data=None):
"""
Performs a request to the specified endpoint and returns the response object.
Also checks if the status code of the response is exp_code, if provided.
The method parameter should be a get/post/etc from an APIClient object.
"""
resp = method(path.join(BASE_PATH, endpoint.strip("/")), follow=True, data=data)
if exp_code is not None:
self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp))
return resp
def req_fails_perms(self, method, endpoint, data=None):
"""
Performs a request to the specified endpoint, and checks that it fails
due to the user lacking proper permissions.
The method parameter should be a get/post/etc from an APIClient object.
Returns the response object afterwards.
"""
return self.request(
method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data
)
def req_fails_method(self, method, endpoint, data=None):
"""
Performs a request to the specified endpoint, and checks that it fails
due to the endpoint not supporting the provided method.
Returns the response object.
"""
return self.request(
method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data
)
def req_succeeds(self, method, endpoint, data=None):
"""
Performs a request to the specified endpoint, and checks that it succeeds.
The method parameter should be a get/post/etc from an APIClient object.
Returns the response object.
"""
return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data)
# ----- MODEL GENERATION -----
def random_objs(clazz, n=1):
"""
Generates N instances of the provided class, retrieved from the database.
"""
src = clazz.objects.all()
for _ in range(n):
yield random.choice(src)
def make_test_courses():
"""Creates course objects and persists them to database."""
return [CourseFactory.create(name=name) for name in COURSE_NAMES]
def make_test_users(n):
"""Creates N test users and persists them to database."""
return UserFactory.create_batch(n)
def give_role(user, role, course):
"""
Creates a profile for USER in a given ROLE for the provided COURSE, and
saves the profile to database.
"""
return ProfileFactory.create(
user=user, course=course, leader=None, section=None, role=role
)
def create_empty_section_for(mentor):
"""
Creates a section for MENTOR without populated students.
"""
return SectionFactory.create(course=mentor.course, mentor=mentor)
def enroll_user_as_student(user, section):
"""
Creates a student profile for USER, and assigns them to the given SECTION.
Also creates blank attendances as necessary.
Returns the created profile.
"""
student = give_role(user, Profile.STUDENT, section.course)
student.section = section
student.leader = section.leader
create_attendances_for(student)
return student
def gen_test_data(cls, NUM_USERS=300):
"""
Adds NUM_USERS users to the database and initializes profiles for them as follows:
- 2 coords per course
- 4 SMs per coord, each with a section of 3-6 students
- 3 JMs per SM, each with a section of 3-6 students
"""
users = iter(make_test_users(NUM_USERS))
courses = make_test_courses()
# for sanity tests, everyone only has one role for now
num_courses = len(courses)
coords, seniors, juniors, students = [], [], [], []
COORD_COUNT = 2
SM_COUNT = 4
JM_COUNT = 3
def assign(role, leader, c, lst):
# returns the profile created
profile = give_role(next(users), role, c)
profile.leader = leader
lst.append(profile)
return profile
try:
for c in courses:
# coords
for i in range(COORD_COUNT):
coord = assign(Profile.COORDINATOR, None, c, coords)
# SMs
for j in range(SM_COUNT):
sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors)
section = create_empty_section_for(sm)
for k in range(random.randint(3, 6)):
students.append(enroll_user_as_student(next(users), section))
# JMs
for k in range(JM_COUNT):
jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors)
for _ in range(random.randint(3, 6)):
students.append(
enroll_user_as_student(next(users), section)
)
except StopIteration:
pass
cls.users = users
cls.courses = courses
cls.coords = coords
cls.seniors = seniors
cls.juniors = juniors
cls.students = students
| 2.203125 | 2 |
coldtype/beziers.py | tallpauley/coldtype | 0 | 2069 | import math
from fontTools.pens.recordingPen import RecordingPen, replayRecording
from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT
from coldtype.geometry import Rect, Point
def raise_quadratic(start, a, b):
c0 = start
c1 = (c0[0] + (2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1] - c0[1]))
c2 = (b[0] + (2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1] - b[1]))
c3 = (b[0], b[1])
return [c1, c2, c3]
__length_cache = {}
__split_cache = {}
def splitCubicAtT_cached(a, b, c, d, t):
global __split_cache
abcdt = (a, b, c, d, t)
sc = __split_cache.get(abcdt)
if sc:
return sc
else:
s = splitCubicAtT(a, b, c, d, t)
__split_cache[abcdt] = s
return s
def calcCubicArcLength_cached(a, b, c, d):
#return calcCubicArcLength(a, b, c, d)
global __length_cache
abcd = (a, b, c, d)
lc = __length_cache.get(abcd)
if lc:
return lc
else:
l = calcCubicArcLength(a, b, c, d)
__length_cache[abcd] = l
return l
class CurveCutter():
def __init__(self, g, inc=0.0015):
if isinstance(g, RecordingPen):
self.pen = g
else:
self.pen = RecordingPen()
g.draw(self.pen)
self.inc = inc
self.length = self.calcCurveLength()
def calcCurveLength(self):
length = 0
for i, (t, pts) in enumerate(self.pen.value):
if t == "curveTo":
p1, p2, p3 = pts
p0 = self.pen.value[i-1][-1][-1]
length += calcCubicArcLength_cached(p0, p1, p2, p3)
elif t == "lineTo":
pass # todo
return length
def subsegment(self, start=None, end=None):
global __cut_cache
inc = self.inc
length = self.length
ended = False
_length = 0
out = []
for i, (t, pts) in enumerate(self.pen.value):
if t == "curveTo":
p1, p2, p3 = pts
p0 = self.pen.value[i-1][-1][-1]
length_arc = calcCubicArcLength_cached(p0, p1, p2, p3)
if _length + length_arc < end:
_length += length_arc
else:
t = inc
tries = 0
while not ended:
a, b = splitCubicAtT_cached(p0, p1, p2, p3, t)
length_a = calcCubicArcLength_cached(*a)
if _length + length_a > end:
ended = True
out.append(("curveTo", a[1:]))
else:
t += inc
tries += 1
if t == "lineTo":
pass # TODO
if not ended:
out.append((t, pts))
if out[-1][0] != "endPath":
out.append(("endPath",[]))
return out
def subsegmentPoint(self, start=0, end=1):
inc = self.inc
subsegment = self.subsegment(start=start, end=end)
try:
t, (a, b, c) = subsegment[-2]
tangent = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) + math.pi*.5)
return c, tangent
except ValueError:
return None, None | 2.515625 | 3 |
p1_navigation/train.py | nick0lay/deep-reinforcement-learning | 0 | 2070 | """
Project for Udacity Danaodgree in Deep Reinforcement Learning
This script train an agent to navigate (and collect bananas!) in a large, square world.
A reward of +1 is provided for collecting a yellow banana, and a reward of -1 is provided for collecting a blue banana. Thus, the goal of your agent is to collect as many yellow bananas as possible while avoiding blue bananas.
The state space has 37 dimensions and contains the agent's velocity, along with ray-based perception of objects around the agent's forward direction. Given this information, the agent has to learn how to best select actions. Four discrete actions are available, corresponding to:
0 - move forward.
1 - move backward.
2 - turn left.
3 - turn right.
The task is episodic, and in order to solve the environment, your agent must get an average score of +13 over 100 consecutive episodes.
"""
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
from dqn_agent import Agent
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
"""
Unity environment configuration
Mac: "path/to/Banana.app"
Windows (x86): "path/to/Banana_Windows_x86/Banana.exe"
Windows (x86_64): "path/to/Banana_Windows_x86_64/Banana.exe"
Linux (x86): "path/to/Banana_Linux/Banana.x86"
Linux (x86_64): "path/to/Banana_Linux/Banana.x86_64"
Linux (x86, headless): "path/to/Banana_Linux_NoVis/Banana.x86"
Linux (x86_64, headless): "path/to/Banana_Linux_NoVis/Banana.x86_64"
"""
# start Unity environment
env = UnityEnvironment(file_name="Banana.app")
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=False)[brain_name]
action_size = brain.vector_action_space_size
state_size = len(env_info.vector_observations[0])
# initialize agent
agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device)
def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
# reset environment
env_info = env.reset(train_mode=True)[brain_name]
# get initial state
state = env_info.vector_observations[0]
# set initial score
score = 0
while True:
action = agent.act(state, eps)
env_info = env.step(action)[brain_name]
next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=14:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
train() | 4.09375 | 4 |
models/model_factory.py | jac99/Egonn | 9 | 2071 | # Warsaw University of Technology
from layers.eca_block import ECABasicBlock
from models.minkgl import MinkHead, MinkTrunk, MinkGL
from models.minkloc import MinkLoc
from third_party.minkloc3d.minkloc import MinkLoc3D
from misc.utils import ModelParams
def model_factory(model_params: ModelParams):
in_channels = 1
if model_params.model == 'MinkLoc':
model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size,
output_dim=model_params.output_dim, planes=model_params.planes,
layers=model_params.layers, num_top_down=model_params.num_top_down,
conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block,
pooling_method=model_params.pooling)
elif model_params.model == 'MinkLoc3D':
model = MinkLoc3D()
elif 'egonn' in model_params.model:
model = create_egonn_model(model_params)
else:
raise NotImplementedError('Model not implemented: {}'.format(model_params.model))
return model
def create_egonn_model(model_params: ModelParams):
model_name = model_params.model
global_normalize = False
local_normalize = True
if model_name == 'egonn':
# THIS IS OUR BEST MODEL
block = ECABasicBlock
planes = [32, 64, 64, 128, 128, 128, 128]
layers = [1, 1, 1, 1, 1, 1, 1]
global_in_levels = [5, 6, 7]
global_map_channels = 128
global_descriptor_size = 256
local_in_levels = [3, 4]
local_map_channels = 64
local_descriptor_size = 128
else:
raise NotImplementedError(f'Unknown model: {model_name}')
# Planes list number of channels for level 1 and above
global_in_channels = [planes[i-1] for i in global_in_levels]
head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels)
if len(local_in_levels) > 0:
local_in_channels = [planes[i-1] for i in local_in_levels]
head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels)
else:
head_local = None
min_out_level = len(planes)
if len(global_in_levels) > 0:
min_out_level = min(min_out_level, min(global_in_levels))
if len(local_in_levels) > 0:
min_out_level = min(min_out_level, min(local_in_levels))
trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block,
min_out_level=min_out_level)
net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size,
local_normalize=local_normalize, global_head=head_global,
global_descriptor_size=global_descriptor_size, global_pool_method='GeM',
global_normalize=global_normalize, quantizer=model_params.quantizer)
return net | 2.28125 | 2 |
mdns/Phidget22Python/Phidget22/Phidget.py | rabarar/phidget_docker | 0 | 2072 | import sys
import ctypes
from Phidget22.PhidgetSupport import PhidgetSupport
from Phidget22.Async import *
from Phidget22.ChannelClass import ChannelClass
from Phidget22.ChannelSubclass import ChannelSubclass
from Phidget22.DeviceClass import DeviceClass
from Phidget22.DeviceID import DeviceID
from Phidget22.ErrorEventCode import ErrorEventCode
from Phidget22.PhidgetException import PhidgetException
class Phidget:
def __init__(self):
self.handle = ctypes.c_void_p()
if sys.platform == 'win32':
self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
else:
self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
self._Attach = None
self._onAttach = None
if sys.platform == 'win32':
self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
else:
self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)
self._Detach = None
self._onDetach = None
if sys.platform == 'win32':
self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p)
else:
self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p)
self._Error = None
self._onError = None
if sys.platform == 'win32':
self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p)
else:
self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p)
self._PropertyChange = None
self._onPropertyChange = None
def __eq__(self, other):
return hasattr(other, 'handle') and self.handle.value == other.handle.value
def __hash__(self):
return self.handle.value
def __str__(self):
_value = (ctypes.c_char * 65536)()
_valueLen = ctypes.c_int32(65536)
if self.getIsChannel():
__func = PhidgetSupport.getDll().channelInfo
else:
__func = PhidgetSupport.getDll().deviceInfo
result = __func(self.handle, ctypes.byref(_value), _valueLen)
return _value.value.decode('utf- 8')
def __del__(self):
__func = PhidgetSupport.getDll().Phidget_delete
__func.restype = ctypes.c_int32
res = __func(ctypes.byref(self.handle))
self.handle = None
if res > 0:
raise PhidgetException(res)
def _localAttachEvent(self, handle, userPtr):
if self._Attach == None:
return
self._Attach(self)
def setOnAttachHandler(self, handler):
if handler == None:
self._Attach = None
self._onAttach = None
else:
self._Attach = handler
self._onAttach = self._AttachFactory(self._localAttachEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnAttachHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onAttach, None)
except RuntimeError:
self._Attach = None
self._onAttach = None
def _localDetachEvent(self, handle, userPtr):
if self._Detach == None:
return
self._Detach(self)
def setOnDetachHandler(self, handler):
if handler == None:
self._Detach = None
self._onDetach = None
else:
self._Detach = handler
self._onDetach = self._DetachFactory(self._localDetachEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnDetachHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onDetach, None)
except RuntimeError:
self._Detach = None
self._onDetach = None
def _localErrorEvent(self, handle, userPtr, Code, Description):
if self._Error == None:
return
Description = Description.decode('utf-8')
self._Error(self, Code, Description)
def setOnErrorHandler(self, handler):
if handler == None:
self._Error = None
self._onError = None
else:
self._Error = handler
self._onError = self._ErrorFactory(self._localErrorEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnErrorHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onError, None)
except RuntimeError:
self._Error = None
self._onError = None
def _localPropertyChangeEvent(self, handle, userPtr, propertyName):
if self._PropertyChange == None:
return
propertyName = propertyName.decode('utf-8')
self._PropertyChange(self, propertyName)
def setOnPropertyChangeHandler(self, handler):
if handler == None:
self._PropertyChange = None
self._onPropertyChange = None
else:
self._PropertyChange = handler
self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent)
try:
__func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler
__func.restype = ctypes.c_int32
res = __func(self.handle, self._onPropertyChange, None)
except RuntimeError:
self._PropertyChange = None
self._onPropertyChange = None
@staticmethod
def finalize(flags):
_flags = ctypes.c_int32(flags)
__func = PhidgetSupport.getDll().Phidget_finalize
__func.restype = ctypes.c_int32
result = __func(_flags)
if result > 0:
raise PhidgetException(result)
@staticmethod
def getLibraryVersion():
_LibraryVersion = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getLibraryVersion
__func.restype = ctypes.c_int32
result = __func(ctypes.byref(_LibraryVersion))
if result > 0:
raise PhidgetException(result)
return _LibraryVersion.value.decode('utf-8')
@staticmethod
def getLibraryVersionNumber():
_LibraryVersionNumber = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber
__func.restype = ctypes.c_int32
result = __func(ctypes.byref(_LibraryVersionNumber))
if result > 0:
raise PhidgetException(result)
return _LibraryVersionNumber.value.decode('utf-8')
@staticmethod
def resetLibrary():
__func = PhidgetSupport.getDll().Phidget_resetLibrary
__func.restype = ctypes.c_int32
result = __func()
if result > 0:
raise PhidgetException(result)
def getAttached(self):
_Attached = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getAttached
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Attached))
if result > 0:
raise PhidgetException(result)
return _Attached.value
def getChannel(self):
_Channel = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getChannel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Channel))
if result > 0:
raise PhidgetException(result)
return _Channel.value
def setChannel(self, Channel):
_Channel = ctypes.c_int(Channel)
__func = PhidgetSupport.getDll().Phidget_setChannel
__func.restype = ctypes.c_int32
result = __func(self.handle, _Channel)
if result > 0:
raise PhidgetException(result)
def getChannelClass(self):
_ChannelClass = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getChannelClass
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelClass))
if result > 0:
raise PhidgetException(result)
return _ChannelClass.value
def getChannelClassName(self):
_ChannelClassName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getChannelClassName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelClassName))
if result > 0:
raise PhidgetException(result)
return _ChannelClassName.value.decode('utf-8')
def getChannelName(self):
_ChannelName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getChannelName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelName))
if result > 0:
raise PhidgetException(result)
return _ChannelName.value.decode('utf-8')
def getChannelSubclass(self):
_ChannelSubclass = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getChannelSubclass
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ChannelSubclass))
if result > 0:
raise PhidgetException(result)
return _ChannelSubclass.value
def close(self):
__func = PhidgetSupport.getDll().Phidget_close
__func.restype = ctypes.c_int32
result = __func(self.handle)
if result > 0:
raise PhidgetException(result)
def getDeviceChannelCount(self, cls):
_cls = ctypes.c_int(cls)
_count = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount
__func.restype = ctypes.c_int32
result = __func(self.handle, _cls, ctypes.byref(_count))
if result > 0:
raise PhidgetException(result)
return _count.value
def getDeviceClass(self):
_DeviceClass = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getDeviceClass
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceClass))
if result > 0:
raise PhidgetException(result)
return _DeviceClass.value
def getDeviceClassName(self):
_DeviceClassName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceClassName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceClassName))
if result > 0:
raise PhidgetException(result)
return _DeviceClassName.value.decode('utf-8')
def getDeviceID(self):
_DeviceID = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getDeviceID
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceID))
if result > 0:
raise PhidgetException(result)
return _DeviceID.value
def getDeviceLabel(self):
_DeviceLabel = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceLabel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceLabel))
if result > 0:
raise PhidgetException(result)
return _DeviceLabel.value.decode('utf-8')
def setDeviceLabel(self, DeviceLabel):
_DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8'))
__func = PhidgetSupport.getDll().Phidget_setDeviceLabel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceLabel))
if result > 0:
raise PhidgetException(result)
def getDeviceName(self):
_DeviceName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceName))
if result > 0:
raise PhidgetException(result)
return _DeviceName.value.decode('utf-8')
def getDeviceSerialNumber(self):
_DeviceSerialNumber = ctypes.c_int32()
__func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceSerialNumber))
if result > 0:
raise PhidgetException(result)
return _DeviceSerialNumber.value
def setDeviceSerialNumber(self, DeviceSerialNumber):
_DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber)
__func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber
__func.restype = ctypes.c_int32
result = __func(self.handle, _DeviceSerialNumber)
if result > 0:
raise PhidgetException(result)
def getDeviceSKU(self):
_DeviceSKU = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getDeviceSKU
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceSKU))
if result > 0:
raise PhidgetException(result)
return _DeviceSKU.value.decode('utf-8')
def getDeviceVersion(self):
_DeviceVersion = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getDeviceVersion
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_DeviceVersion))
if result > 0:
raise PhidgetException(result)
return _DeviceVersion.value
def getHub(self):
_Hub = ctypes.c_void_p()
__func = PhidgetSupport.getDll().Phidget_getHub
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Hub))
if result > 0:
raise PhidgetException(result)
__Hub = Phidget()
__Hub.handle = _Hub
return __Hub
def getHubPort(self):
_HubPort = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getHubPort
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPort))
if result > 0:
raise PhidgetException(result)
return _HubPort.value
def setHubPort(self, HubPort):
_HubPort = ctypes.c_int(HubPort)
__func = PhidgetSupport.getDll().Phidget_setHubPort
__func.restype = ctypes.c_int32
result = __func(self.handle, _HubPort)
if result > 0:
raise PhidgetException(result)
def getHubPortCount(self):
_HubPortCount = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getHubPortCount
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPortCount))
if result > 0:
raise PhidgetException(result)
return _HubPortCount.value
def getHubPortSpeed(self):
_HubPortSpeed = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getHubPortSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPortSpeed))
if result > 0:
raise PhidgetException(result)
return _HubPortSpeed.value
def setHubPortSpeed(self, HubPortSpeed):
_HubPortSpeed = ctypes.c_uint32(HubPortSpeed)
__func = PhidgetSupport.getDll().Phidget_setHubPortSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, _HubPortSpeed)
if result > 0:
raise PhidgetException(result)
def getMaxHubPortSpeed(self):
_MaxHubPortSpeed = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed))
if result > 0:
raise PhidgetException(result)
return _MaxHubPortSpeed.value
def getHubPortSupportsSetSpeed(self):
_HubPortSupportsSetSpeed = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed))
if result > 0:
raise PhidgetException(result)
return _HubPortSupportsSetSpeed.value
def getIsChannel(self):
_IsChannel = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsChannel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsChannel))
if result > 0:
raise PhidgetException(result)
return _IsChannel.value
def getIsHubPortDevice(self):
_IsHubPortDevice = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsHubPortDevice))
if result > 0:
raise PhidgetException(result)
return _IsHubPortDevice.value
def setIsHubPortDevice(self, IsHubPortDevice):
_IsHubPortDevice = ctypes.c_int(IsHubPortDevice)
__func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice
__func.restype = ctypes.c_int32
result = __func(self.handle, _IsHubPortDevice)
if result > 0:
raise PhidgetException(result)
def getIsLocal(self):
_IsLocal = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsLocal
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsLocal))
if result > 0:
raise PhidgetException(result)
return _IsLocal.value
def setIsLocal(self, IsLocal):
_IsLocal = ctypes.c_int(IsLocal)
__func = PhidgetSupport.getDll().Phidget_setIsLocal
__func.restype = ctypes.c_int32
result = __func(self.handle, _IsLocal)
if result > 0:
raise PhidgetException(result)
def getIsRemote(self):
_IsRemote = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getIsRemote
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_IsRemote))
if result > 0:
raise PhidgetException(result)
return _IsRemote.value
def setIsRemote(self, IsRemote):
_IsRemote = ctypes.c_int(IsRemote)
__func = PhidgetSupport.getDll().Phidget_setIsRemote
__func.restype = ctypes.c_int32
result = __func(self.handle, _IsRemote)
if result > 0:
raise PhidgetException(result)
def open(self):
__func = PhidgetSupport.getDll().Phidget_open
__func.restype = ctypes.c_int32
result = __func(self.handle)
if result > 0:
raise PhidgetException(result)
def openWaitForAttachment(self, timeout):
_timeout = ctypes.c_uint32(timeout)
__func = PhidgetSupport.getDll().Phidget_openWaitForAttachment
__func.restype = ctypes.c_int32
result = __func(self.handle, _timeout)
if result > 0:
raise PhidgetException(result)
def getParent(self):
_Parent = ctypes.c_void_p()
__func = PhidgetSupport.getDll().Phidget_getParent
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_Parent))
if result > 0:
raise PhidgetException(result)
__Parent = Phidget()
__Parent.handle = _Parent
return __Parent
def getServerHostname(self):
_ServerHostname = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerHostname
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerHostname))
if result > 0:
raise PhidgetException(result)
return _ServerHostname.value.decode('utf-8')
def getServerName(self):
_ServerName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerName))
if result > 0:
raise PhidgetException(result)
return _ServerName.value.decode('utf-8')
def setServerName(self, ServerName):
_ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8'))
__func = PhidgetSupport.getDll().Phidget_setServerName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerName))
if result > 0:
raise PhidgetException(result)
def getServerPeerName(self):
_ServerPeerName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerPeerName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerPeerName))
if result > 0:
raise PhidgetException(result)
return _ServerPeerName.value.decode('utf-8')
def getServerUniqueName(self):
_ServerUniqueName = ctypes.c_char_p()
__func = PhidgetSupport.getDll().Phidget_getServerUniqueName
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_ServerUniqueName))
if result > 0:
raise PhidgetException(result)
return _ServerUniqueName.value.decode('utf-8')
def getMaxVINTDeviceSpeed(self):
_MaxVINTDeviceSpeed = ctypes.c_uint32()
__func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed))
if result > 0:
raise PhidgetException(result)
return _MaxVINTDeviceSpeed.value
def getVINTDeviceSupportsSetSpeed(self):
_VINTDeviceSupportsSetSpeed = ctypes.c_int()
__func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed))
if result > 0:
raise PhidgetException(result)
return _VINTDeviceSupportsSetSpeed.value
def writeDeviceLabel(self, deviceLabel):
_deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8'))
__func = PhidgetSupport.getDll().Phidget_writeDeviceLabel
__func.restype = ctypes.c_int32
result = __func(self.handle, ctypes.byref(_deviceLabel))
if result > 0:
raise PhidgetException(result)
ANY_SERIAL_NUMBER = -1
ANY_HUB_PORT = -1
ANY_CHANNEL = -1
ANY_LABEL = None
INFINITE_TIMEOUT = 0
DEFAULT_TIMEOUT = 1000
| 2.015625 | 2 |
openprocurement/auctions/geb/tests/blanks/create.py | oleksiyVeretiuk/openprocurement.auctions.geb | 0 | 2073 | def create_auction(self):
expected_http_status = '201 Created'
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
self.assertEqual(response.status, expected_http_status)
def create_auction_check_minNumberOfQualifiedBids(self):
expected_minNumberOfQualifiedBids = 2
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
self.assertEqual(response.json['data']['minNumberOfQualifiedBids'],
expected_minNumberOfQualifiedBids)
def create_auction_check_auctionParameters(self):
expected_auctionParameters = {'type': 'texas'}
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
self.assertEqual(response.json['data']['auctionParameters'],
expected_auctionParameters)
def create_auction_invalid_auctionPeriod(self):
expected_http_status = '422 Unprocessable Entity'
auction = self.auction
auction.pop('auctionPeriod')
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data, status=422)
self.assertEqual(response.status, expected_http_status)
entrypoint = '/auctions'
auction['auctionPeriod'] = {'startDate': None}
response = self.app.post_json(entrypoint, request_data, status=422)
self.assertEqual(response.status, expected_http_status)
def create_auction_dump(self):
request_data = {"data": self.auction}
entrypoint = '/auctions'
response = self.app.post_json(entrypoint, request_data)
filename = 'docs/source/tutorial/create_auction.http'
self.dump(response.request, response, filename)
| 2.640625 | 3 |
tests/integration/test_celery.py | crossscreenmedia/scout_apm_python | 0 | 2074 | # coding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from contextlib import contextmanager
import celery
import pytest
from celery.signals import setup_logging
import scout_apm.celery
from scout_apm.api import Config
# http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test
skip_unless_celery_4_plus = pytest.mark.skipif(
celery.VERSION < (4, 0), reason="pytest fixtures added in Celery 4.0"
)
@setup_logging.connect
def do_nothing(**kwargs):
# Just by connecting to this signal, we prevent Celery from setting up
# logging - and stop it from interfering with global state
# http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging
pass
@contextmanager
def app_with_scout(app=None, config=None):
"""
Context manager that configures a Celery app with Scout installed.
"""
if app is None:
app = celery.Celery("tasks", broker="memory://")
# Enable Scout by default in tests.
if config is None:
config = {"monitor": True}
# Disable running the agent.
config["core_agent_launch"] = False
@app.task
def hello():
return "Hello World!"
# Setup according to https://docs.scoutapm.com/#celery
Config.set(**config)
scout_apm.celery.install()
try:
yield app
finally:
scout_apm.celery.uninstall()
# Reset Scout configuration.
Config.reset_all()
def test_hello_eager(tracked_requests):
with app_with_scout() as app:
result = app.tasks["tests.integration.test_celery.hello"].apply()
assert result.result == "Hello World!"
assert len(tracked_requests) == 1
tracked_request = tracked_requests[0]
assert "task_id" in tracked_request.tags
assert tracked_request.tags["is_eager"] is True
assert tracked_request.tags["exchange"] == "unknown"
assert tracked_request.tags["routing_key"] == "unknown"
assert tracked_request.tags["queue"] == "unknown"
assert tracked_request.active_spans == []
assert len(tracked_request.complete_spans) == 1
span = tracked_request.complete_spans[0]
assert span.operation == "Job/tests.integration.test_celery.hello"
@skip_unless_celery_4_plus
def test_hello_worker(celery_app, celery_worker, tracked_requests):
with app_with_scout(app=celery_app) as app:
result = app.tasks["tests.integration.test_celery.hello"].delay().get()
assert result == "Hello World!"
assert len(tracked_requests) == 1
tracked_request = tracked_requests[0]
assert "task_id" in tracked_request.tags
assert tracked_request.tags["is_eager"] is False
assert tracked_request.tags["exchange"] == ""
assert tracked_request.tags["routing_key"] == "celery"
assert tracked_request.tags["queue"] == "unknown"
assert (
0.0 <= tracked_request.tags["queue_time"] < 60.0
) # Assume test took <60 seconds
assert tracked_request.active_spans == []
assert len(tracked_request.complete_spans) == 1
span = tracked_request.complete_spans[0]
assert span.operation == "Job/tests.integration.test_celery.hello"
@skip_unless_celery_4_plus
def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests):
with app_with_scout(app=celery_app) as app:
result = (
app.tasks["tests.integration.test_celery.hello"]
.apply_async(headers={"scout_task_start": "an evil string"})
.get()
)
assert result == "Hello World!"
assert len(tracked_requests) == 1
tracked_request = tracked_requests[0]
assert tracked_request.active_spans == []
assert len(tracked_request.complete_spans) == 1
span = tracked_request.complete_spans[0]
assert span.operation == "Job/tests.integration.test_celery.hello"
assert "queue_time" not in span.tags
@skip_unless_celery_4_plus
def test_hello_worker_chain(celery_app, celery_worker, tracked_requests):
with app_with_scout(app=celery_app) as app:
hello = app.tasks["tests.integration.test_celery.hello"]
result = (hello.si() | hello.si()).apply_async().get()
assert result == "Hello World!"
assert len(tracked_requests) == 2
assert [t.complete_spans[0].operation for t in tracked_requests] == [
"Job/tests.integration.test_celery.hello",
"Job/tests.integration.test_celery.hello",
]
assert "parent_task_id" not in tracked_requests[0].tags
first_task_id = tracked_requests[0].tags["task_id"]
assert tracked_requests[1].tags["parent_task_id"] == first_task_id
def test_no_monitor(tracked_requests):
# With an empty config, "monitor" defaults to False.
with app_with_scout(config={}) as app:
result = app.tasks["tests.integration.test_celery.hello"].apply()
assert result.result == "Hello World!"
assert tracked_requests == []
| 1.804688 | 2 |
molly/apps/places/migrations/0001_initial.py | mollyproject/mollyproject | 7 | 2075 | <gh_stars>1-10
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Source'
db.create_table('places_source', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('places', ['Source'])
# Adding model 'EntityType'
db.create_table('places_entitytype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('article', self.gf('django.db.models.fields.CharField')(max_length=2)),
('verbose_name', self.gf('django.db.models.fields.TextField')()),
('verbose_name_plural', self.gf('django.db.models.fields.TextField')()),
('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)),
('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)),
('note', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('places', ['EntityType'])
# Adding M2M table for field subtype_of on 'EntityType'
db.create_table('places_entitytype_subtype_of', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)),
('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False))
))
db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id'])
# Adding M2M table for field subtype_of_completion on 'EntityType'
db.create_table('places_entitytype_subtype_of_completion', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)),
('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False))
))
db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id'])
# Adding model 'Identifier'
db.create_table('places_identifier', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)),
('value', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal('places', ['Identifier'])
# Adding model 'Entity'
db.create_table('places_entity', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.TextField')(blank=True)),
('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])),
('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)),
('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)),
('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)),
('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')),
('absolute_url', self.gf('django.db.models.fields.TextField')()),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)),
('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)),
('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)),
('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal('places', ['Entity'])
# Adding M2M table for field all_types on 'Entity'
db.create_table('places_entity_all_types', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('entity', models.ForeignKey(orm['places.entity'], null=False)),
('entitytype', models.ForeignKey(orm['places.entitytype'], null=False))
))
db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id'])
# Adding M2M table for field all_types_completion on 'Entity'
db.create_table('places_entity_all_types_completion', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('entity', models.ForeignKey(orm['places.entity'], null=False)),
('entitytype', models.ForeignKey(orm['places.entitytype'], null=False))
))
db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id'])
# Adding M2M table for field _identifiers on 'Entity'
db.create_table('places_entity__identifiers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('entity', models.ForeignKey(orm['places.entity'], null=False)),
('identifier', models.ForeignKey(orm['places.identifier'], null=False))
))
db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id'])
def backwards(self, orm):
# Deleting model 'Source'
db.delete_table('places_source')
# Deleting model 'EntityType'
db.delete_table('places_entitytype')
# Removing M2M table for field subtype_of on 'EntityType'
db.delete_table('places_entitytype_subtype_of')
# Removing M2M table for field subtype_of_completion on 'EntityType'
db.delete_table('places_entitytype_subtype_of_completion')
# Deleting model 'Identifier'
db.delete_table('places_identifier')
# Deleting model 'Entity'
db.delete_table('places_entity')
# Removing M2M table for field all_types on 'Entity'
db.delete_table('places_entity_all_types')
# Removing M2M table for field all_types_completion on 'Entity'
db.delete_table('places_entity_all_types_completion')
# Removing M2M table for field _identifiers on 'Entity'
db.delete_table('places_entity__identifiers')
models = {
'places.entity': {
'Meta': {'ordering': "('title',)", 'object_name': 'Entity'},
'_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['places.Identifier']", 'symmetrical': 'False'}),
'_metadata': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'absolute_url': ('django.db.models.fields.TextField', [], {}),
'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entities_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Entity']", 'null': 'True'}),
'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.EntityType']", 'null': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Source']"}),
'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'places.entitytype': {
'Meta': {'ordering': "('verbose_name',)", 'object_name': 'EntityType'},
'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'subtypes_completion'", 'blank': 'True', 'to': "orm['places.EntityType']"}),
'verbose_name': ('django.db.models.fields.TextField', [], {}),
'verbose_name_plural': ('django.db.models.fields.TextField', [], {})
},
'places.identifier': {
'Meta': {'object_name': 'Identifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'places.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['places']
| 2.078125 | 2 |
sdk/python/pulumi_azure_native/servicebus/v20210601preview/get_subscription.py | polivbr/pulumi-azure-native | 0 | 2076 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSubscriptionResult',
'AwaitableGetSubscriptionResult',
'get_subscription',
]
@pulumi.output_type
class GetSubscriptionResult:
"""
Description of subscription resource.
"""
def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None, system_data=None, type=None, updated_at=None):
if accessed_at and not isinstance(accessed_at, str):
raise TypeError("Expected argument 'accessed_at' to be a str")
pulumi.set(__self__, "accessed_at", accessed_at)
if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str):
raise TypeError("Expected argument 'auto_delete_on_idle' to be a str")
pulumi.set(__self__, "auto_delete_on_idle", auto_delete_on_idle)
if client_affine_properties and not isinstance(client_affine_properties, dict):
raise TypeError("Expected argument 'client_affine_properties' to be a dict")
pulumi.set(__self__, "client_affine_properties", client_affine_properties)
if count_details and not isinstance(count_details, dict):
raise TypeError("Expected argument 'count_details' to be a dict")
pulumi.set(__self__, "count_details", count_details)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool):
raise TypeError("Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be a bool")
pulumi.set(__self__, "dead_lettering_on_filter_evaluation_exceptions", dead_lettering_on_filter_evaluation_exceptions)
if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool):
raise TypeError("Expected argument 'dead_lettering_on_message_expiration' to be a bool")
pulumi.set(__self__, "dead_lettering_on_message_expiration", dead_lettering_on_message_expiration)
if default_message_time_to_live and not isinstance(default_message_time_to_live, str):
raise TypeError("Expected argument 'default_message_time_to_live' to be a str")
pulumi.set(__self__, "default_message_time_to_live", default_message_time_to_live)
if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str):
raise TypeError("Expected argument 'duplicate_detection_history_time_window' to be a str")
pulumi.set(__self__, "duplicate_detection_history_time_window", duplicate_detection_history_time_window)
if enable_batched_operations and not isinstance(enable_batched_operations, bool):
raise TypeError("Expected argument 'enable_batched_operations' to be a bool")
pulumi.set(__self__, "enable_batched_operations", enable_batched_operations)
if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str):
raise TypeError("Expected argument 'forward_dead_lettered_messages_to' to be a str")
pulumi.set(__self__, "forward_dead_lettered_messages_to", forward_dead_lettered_messages_to)
if forward_to and not isinstance(forward_to, str):
raise TypeError("Expected argument 'forward_to' to be a str")
pulumi.set(__self__, "forward_to", forward_to)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_client_affine and not isinstance(is_client_affine, bool):
raise TypeError("Expected argument 'is_client_affine' to be a bool")
pulumi.set(__self__, "is_client_affine", is_client_affine)
if lock_duration and not isinstance(lock_duration, str):
raise TypeError("Expected argument 'lock_duration' to be a str")
pulumi.set(__self__, "lock_duration", lock_duration)
if max_delivery_count and not isinstance(max_delivery_count, int):
raise TypeError("Expected argument 'max_delivery_count' to be a int")
pulumi.set(__self__, "max_delivery_count", max_delivery_count)
if message_count and not isinstance(message_count, float):
raise TypeError("Expected argument 'message_count' to be a float")
pulumi.set(__self__, "message_count", message_count)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if requires_session and not isinstance(requires_session, bool):
raise TypeError("Expected argument 'requires_session' to be a bool")
pulumi.set(__self__, "requires_session", requires_session)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if updated_at and not isinstance(updated_at, str):
raise TypeError("Expected argument 'updated_at' to be a str")
pulumi.set(__self__, "updated_at", updated_at)
@property
@pulumi.getter(name="accessedAt")
def accessed_at(self) -> str:
"""
Last time there was a receive request to this subscription.
"""
return pulumi.get(self, "accessed_at")
@property
@pulumi.getter(name="autoDeleteOnIdle")
def auto_delete_on_idle(self) -> Optional[str]:
"""
ISO 8061 timeSpan idle interval after which the topic is automatically deleted. The minimum duration is 5 minutes.
"""
return pulumi.get(self, "auto_delete_on_idle")
@property
@pulumi.getter(name="clientAffineProperties")
def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']:
"""
Properties specific to client affine subscriptions.
"""
return pulumi.get(self, "client_affine_properties")
@property
@pulumi.getter(name="countDetails")
def count_details(self) -> 'outputs.MessageCountDetailsResponse':
"""
Message count details
"""
return pulumi.get(self, "count_details")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
Exact time the message was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="deadLetteringOnFilterEvaluationExceptions")
def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]:
"""
Value that indicates whether a subscription has dead letter support on filter evaluation exceptions.
"""
return pulumi.get(self, "dead_lettering_on_filter_evaluation_exceptions")
@property
@pulumi.getter(name="deadLetteringOnMessageExpiration")
def dead_lettering_on_message_expiration(self) -> Optional[bool]:
"""
Value that indicates whether a subscription has dead letter support when a message expires.
"""
return pulumi.get(self, "dead_lettering_on_message_expiration")
@property
@pulumi.getter(name="defaultMessageTimeToLive")
def default_message_time_to_live(self) -> Optional[str]:
"""
ISO 8061 Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
"""
return pulumi.get(self, "default_message_time_to_live")
@property
@pulumi.getter(name="duplicateDetectionHistoryTimeWindow")
def duplicate_detection_history_time_window(self) -> Optional[str]:
"""
ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.
"""
return pulumi.get(self, "duplicate_detection_history_time_window")
@property
@pulumi.getter(name="enableBatchedOperations")
def enable_batched_operations(self) -> Optional[bool]:
"""
Value that indicates whether server-side batched operations are enabled.
"""
return pulumi.get(self, "enable_batched_operations")
@property
@pulumi.getter(name="forwardDeadLetteredMessagesTo")
def forward_dead_lettered_messages_to(self) -> Optional[str]:
"""
Queue/Topic name to forward the Dead Letter message
"""
return pulumi.get(self, "forward_dead_lettered_messages_to")
@property
@pulumi.getter(name="forwardTo")
def forward_to(self) -> Optional[str]:
"""
Queue/Topic name to forward the messages
"""
return pulumi.get(self, "forward_to")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isClientAffine")
def is_client_affine(self) -> Optional[bool]:
"""
Value that indicates whether the subscription has an affinity to the client id.
"""
return pulumi.get(self, "is_client_affine")
@property
@pulumi.getter(name="lockDuration")
def lock_duration(self) -> Optional[str]:
"""
ISO 8061 lock duration timespan for the subscription. The default value is 1 minute.
"""
return pulumi.get(self, "lock_duration")
@property
@pulumi.getter(name="maxDeliveryCount")
def max_delivery_count(self) -> Optional[int]:
"""
Number of maximum deliveries.
"""
return pulumi.get(self, "max_delivery_count")
@property
@pulumi.getter(name="messageCount")
def message_count(self) -> float:
"""
Number of messages.
"""
return pulumi.get(self, "message_count")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requiresSession")
def requires_session(self) -> Optional[bool]:
"""
Value indicating if a subscription supports the concept of sessions.
"""
return pulumi.get(self, "requires_session")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Enumerates the possible values for the status of a messaging entity.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> str:
"""
The exact time the message was updated.
"""
return pulumi.get(self, "updated_at")
class AwaitableGetSubscriptionResult(GetSubscriptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubscriptionResult(
accessed_at=self.accessed_at,
auto_delete_on_idle=self.auto_delete_on_idle,
client_affine_properties=self.client_affine_properties,
count_details=self.count_details,
created_at=self.created_at,
dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions,
dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration,
default_message_time_to_live=self.default_message_time_to_live,
duplicate_detection_history_time_window=self.duplicate_detection_history_time_window,
enable_batched_operations=self.enable_batched_operations,
forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,
forward_to=self.forward_to,
id=self.id,
is_client_affine=self.is_client_affine,
lock_duration=self.lock_duration,
max_delivery_count=self.max_delivery_count,
message_count=self.message_count,
name=self.name,
requires_session=self.requires_session,
status=self.status,
system_data=self.system_data,
type=self.type,
updated_at=self.updated_at)
def get_subscription(namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
subscription_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult:
"""
Description of subscription resource.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str subscription_name: The subscription name.
:param str topic_name: The topic name.
"""
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['subscriptionName'] = subscription_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value
return AwaitableGetSubscriptionResult(
accessed_at=__ret__.accessed_at,
auto_delete_on_idle=__ret__.auto_delete_on_idle,
client_affine_properties=__ret__.client_affine_properties,
count_details=__ret__.count_details,
created_at=__ret__.created_at,
dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions,
dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration,
default_message_time_to_live=__ret__.default_message_time_to_live,
duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window,
enable_batched_operations=__ret__.enable_batched_operations,
forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to,
forward_to=__ret__.forward_to,
id=__ret__.id,
is_client_affine=__ret__.is_client_affine,
lock_duration=__ret__.lock_duration,
max_delivery_count=__ret__.max_delivery_count,
message_count=__ret__.message_count,
name=__ret__.name,
requires_session=__ret__.requires_session,
status=__ret__.status,
system_data=__ret__.system_data,
type=__ret__.type,
updated_at=__ret__.updated_at)
| 1.59375 | 2 |
py_cfeve/module/CFAF240400E0-030TN-A1.py | crystalfontz/CFA-EVE-Python-Library | 1 | 2077 | <reponame>crystalfontz/CFA-EVE-Python-Library<gh_stars>1-10
#===========================================================================
#
# Crystalfontz Raspberry-Pi Python example library for FTDI / BridgeTek
# EVE graphic accelerators.
#
#---------------------------------------------------------------------------
#
# This file is part of the port/adaptation of existing C based EVE libraries
# to Python for Crystalfontz EVE based displays.
#
# 2021-10-20 <NAME> / Crystalfontz America Inc.
# https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php
#---------------------------------------------------------------------------
#
# This is free and unencumbered software released into the public domain.
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http:#unlicense.org/>
#
#============================================================================
#EVE Device Type
EVE_DEVICE = 811
# EVE Clock Speed
EVE_CLOCK_SPEED = 60000000
# Touch
TOUCH_RESISTIVE = False
TOUCH_CAPACITIVE = False
TOUCH_GOODIX_CAPACITIVE = False
# Define RGB output pins order, determined by PCB layout
LCD_SWIZZLE = 2
# Define active edge of PCLK. Observed by scope:
# 0: Data is put out coincident with falling edge of the clock.
# Rising edge of the clock is in the middle of the data.
# 1: Data is put out coincident with rising edge of the clock.
# Falling edge of the clock is in the middle of the data.
LCD_PCLKPOL = 0
# LCD drive strength: 0=5mA, 1=10mA
LCD_DRIVE_10MA = 0
# Spread Spectrum on RGB signals. Probably not a good idea at higher
# PCLK frequencies.
LCD_PCLK_CSPREAD = 0
#This is not a 24-bit display, so dither
LCD_DITHER = 0
# Pixel clock divisor
LCD_PCLK = 5
#----------------------------------------------------------------------------
# Frame_Rate = 60Hz / 16.7mS
#----------------------------------------------------------------------------
# Horizontal timing
# Target 60Hz frame rate, using the largest possible line time in order to
# maximize the time that the EVE has to process each line.
HPX = 240 # Horizontal Pixel Width
HSW = 10 # Horizontal Sync Width
HBP = 20 # Horizontal Back Porch
HFP = 10 # Horizontal Front Porch
HPP = 209 # Horizontal Pixel Padding
# FTDI needs at least 1 here
# Define the constants needed by the EVE based on the timing
# Active width of LCD display
LCD_WIDTH = HPX
# Start of horizontal sync pulse
LCD_HSYNC0 = HFP
# End of horizontal sync pulse
LCD_HSYNC1 = HFP+HSW
# Start of active line
LCD_HOFFSET = HFP+HSW+HBP
# Total number of clocks per line
LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP
#----------------------------------------------------------------------------
# Vertical timing
VLH = 400 # Vertical Line Height
VS = 2 # Vertical Sync (in lines)
VBP = 2 # Vertical Back Porch
VFP = 4 # Vertical Front Porch
VLP = 1 # Vertical Line Padding
# FTDI needs at least 1 here
# Define the constants needed by the EVE based on the timing
# Active height of LCD display
LCD_HEIGHT = VLH
# Start of vertical sync pulse
LCD_VSYNC0 = VFP
# End of vertical sync pulse
LCD_VSYNC1 = VFP+VS
# Start of active screen
LCD_VOFFSET = VFP+VS+VBP
# Total number of lines per screen
LCD_VCYCLE = VLH+VFP+VS+VBP+VLP | 1.96875 | 2 |
quapy/model_selection.py | OneToolsCollection/HLT-ISTI-QuaPy | 0 | 2078 | import itertools
import signal
from copy import deepcopy
from typing import Union, Callable
import numpy as np
import quapy as qp
from quapy.data.base import LabelledCollection
from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction
from quapy.method.aggregative import BaseQuantifier
import inspect
from util import _check_sample_size
class GridSearchQ(BaseQuantifier):
"""Grid Search optimization targeting a quantification-oriented metric.
Optimizes the hyperparameters of a quantification method, based on an evaluation method and on an evaluation
protocol for quantification.
:param model: the quantifier to optimize
:type model: BaseQuantifier
:param param_grid: a dictionary with keys the parameter names and values the list of values to explore
:param sample_size: the size of the samples to extract from the validation set (ignored if protocl='gen')
:param protocol: either 'app' for the artificial prevalence protocol, 'npp' for the natural prevalence
protocol, or 'gen' for using a custom sampling generator function
:param n_prevpoints: if specified, indicates the number of equally distant points to extract from the interval
[0,1] in order to define the prevalences of the samples; e.g., if n_prevpoints=5, then the prevalences for
each class will be explored in [0.00, 0.25, 0.50, 0.75, 1.00]. If not specified, then eval_budget is requested.
Ignored if protocol!='app'.
:param n_repetitions: the number of repetitions for each combination of prevalences. This parameter is ignored
for the protocol='app' if eval_budget is set and is lower than the number of combinations that would be
generated using the value assigned to n_prevpoints (for the current number of classes and n_repetitions).
Ignored for protocol='npp' and protocol='gen' (use eval_budget for setting a maximum number of samples in
those cases).
:param eval_budget: if specified, sets a ceil on the number of evaluations to perform for each hyper-parameter
combination. For example, if protocol='app', there are 3 classes, n_repetitions=1 and eval_budget=20, then
n_prevpoints will be set to 5, since this will generate 15 different prevalences, i.e., [0, 0, 1],
[0, 0.25, 0.75], [0, 0.5, 0.5] ... [1, 0, 0], and since setting it to 6 would generate more than
20. When protocol='gen', indicates the maximum number of samples to generate, but less samples will be
generated if the generator yields less samples.
:param error: an error function (callable) or a string indicating the name of an error function (valid ones
are those in qp.error.QUANTIFICATION_ERROR
:param refit: whether or not to refit the model on the whole labelled collection (training+validation) with
the best chosen hyperparameter combination. Ignored if protocol='gen'
:param val_split: either a LabelledCollection on which to test the performance of the different settings, or
a float in [0,1] indicating the proportion of labelled data to extract from the training set, or a callable
returning a generator function each time it is invoked (only for protocol='gen').
:param n_jobs: number of parallel jobs
:param random_seed: set the seed of the random generator to replicate experiments. Ignored if protocol='gen'.
:param timeout: establishes a timer (in seconds) for each of the hyperparameters configurations being tested.
Whenever a run takes longer than this timer, that configuration will be ignored. If all configurations end up
being ignored, a TimeoutError exception is raised. If -1 (default) then no time bound is set.
:param verbose: set to True to get information through the stdout
"""
def __init__(self,
model: BaseQuantifier,
param_grid: dict,
sample_size: Union[int, None] = None,
protocol='app',
n_prevpoints: int = None,
n_repetitions: int = 1,
eval_budget: int = None,
error: Union[Callable, str] = qp.error.mae,
refit=True,
val_split=0.4,
n_jobs=1,
random_seed=42,
timeout=-1,
verbose=False):
self.model = model
self.param_grid = param_grid
self.sample_size = sample_size
self.protocol = protocol.lower()
self.n_prevpoints = n_prevpoints
self.n_repetitions = n_repetitions
self.eval_budget = eval_budget
self.refit = refit
self.val_split = val_split
self.n_jobs = n_jobs
self.random_seed = random_seed
self.timeout = timeout
self.verbose = verbose
self.__check_error(error)
assert self.protocol in {'app', 'npp', 'gen'}, \
'unknown protocol: valid ones are "app" or "npp" for the "artificial" or the "natural" prevalence ' \
'protocols. Use protocol="gen" when passing a generator function thorough val_split that yields a ' \
'sample (instances) and their prevalence (ndarray) at each iteration.'
assert self.eval_budget is None or isinstance(self.eval_budget, int)
if self.protocol in ['npp', 'gen']:
if self.protocol=='npp' and (self.eval_budget is None or self.eval_budget <= 0):
raise ValueError(f'when protocol="npp" the parameter eval_budget should be '
f'indicated (and should be >0).')
if self.n_repetitions != 1:
print('[warning] n_repetitions has been set and will be ignored for the selected protocol')
def _sout(self, msg):
if self.verbose:
print(f'[{self.__class__.__name__}]: {msg}')
def __check_training_validation(self, training, validation):
if isinstance(validation, LabelledCollection):
return training, validation
elif isinstance(validation, float):
assert 0. < validation < 1., 'validation proportion should be in (0,1)'
training, validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed)
return training, validation
elif self.protocol=='gen' and inspect.isgenerator(validation()):
return training, validation
else:
raise ValueError(f'"validation" must either be a LabelledCollection or a float in (0,1) indicating the'
f'proportion of training documents to extract (type found: {type(validation)}). '
f'Optionally, "validation" can be a callable function returning a generator that yields '
f'the sample instances along with their true prevalence at each iteration by '
f'setting protocol="gen".')
def __check_error(self, error):
if error in qp.error.QUANTIFICATION_ERROR:
self.error = error
elif isinstance(error, str):
self.error = qp.error.from_name(error)
elif hasattr(error, '__call__'):
self.error = error
else:
raise ValueError(f'unexpected error type; must either be a callable function or a str representing\n'
f'the name of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}')
def __generate_predictions(self, model, val_split):
commons = {
'n_repetitions': self.n_repetitions,
'n_jobs': self.n_jobs,
'random_seed': self.random_seed,
'verbose': False
}
if self.protocol == 'app':
return artificial_prevalence_prediction(
model, val_split, self.sample_size,
n_prevpoints=self.n_prevpoints,
eval_budget=self.eval_budget,
**commons
)
elif self.protocol == 'npp':
return natural_prevalence_prediction(
model, val_split, self.sample_size,
**commons)
elif self.protocol == 'gen':
return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget)
else:
raise ValueError('unknown protocol')
def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable] = None):
""" Learning routine. Fits methods with all combinations of hyperparameters and selects the one minimizing
the error metric.
:param training: the training set on which to optimize the hyperparameters
:param val_split: either a LabelledCollection on which to test the performance of the different settings, or
a float in [0,1] indicating the proportion of labelled data to extract from the training set
:return: self
"""
if val_split is None:
val_split = self.val_split
training, val_split = self.__check_training_validation(training, val_split)
if self.protocol != 'gen':
self.sample_size = _check_sample_size(self.sample_size)
params_keys = list(self.param_grid.keys())
params_values = list(self.param_grid.values())
model = self.model
if self.timeout > 0:
def handler(signum, frame):
self._sout('timeout reached')
raise TimeoutError()
signal.signal(signal.SIGALRM, handler)
self.param_scores_ = {}
self.best_score_ = None
some_timeouts = False
for values in itertools.product(*params_values):
params = dict({k: values[i] for i, k in enumerate(params_keys)})
if self.timeout > 0:
signal.alarm(self.timeout)
try:
# overrides default parameters with the parameters being explored at this iteration
model.set_params(**params)
model.fit(training)
true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split)
score = self.error(true_prevalences, estim_prevalences)
self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}')
if self.best_score_ is None or score < self.best_score_:
self.best_score_ = score
self.best_params_ = params
self.best_model_ = deepcopy(model)
self.param_scores_[str(params)] = score
if self.timeout > 0:
signal.alarm(0)
except TimeoutError:
print(f'timeout reached for config {params}')
some_timeouts = True
if self.best_score_ is None and some_timeouts:
raise TimeoutError('all jobs took more than the timeout time to end')
self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})')
if self.refit:
self._sout(f'refitting on the whole development set')
self.best_model_.fit(training + val_split)
return self
def quantify(self, instances):
"""Estimate class prevalence values using the best model found after calling the :meth:`fit` method.
:param instances: sample contanining the instances
:return: a ndarray of shape `(n_classes)` with class prevalence estimates as according to the best model found
by the model selection process.
"""
assert hasattr(self, 'best_model_'), 'quantify called before fit'
return self.best_model().quantify(instances)
@property
def classes_(self):
"""
Classes on which the quantifier has been trained on.
:return: a ndarray of shape `(n_classes)` with the class identifiers
"""
return self.best_model().classes_
def set_params(self, **parameters):
"""Sets the hyper-parameters to explore.
:param parameters: a dictionary with keys the parameter names and values the list of values to explore
"""
self.param_grid = parameters
def get_params(self, deep=True):
"""Returns the dictionary of hyper-parameters to explore (`param_grid`)
:param deep: Unused
:return: the dictionary `param_grid`
"""
return self.param_grid
def best_model(self):
"""
Returns the best model found after calling the :meth:`fit` method, i.e., the one trained on the combination
of hyper-parameters that minimized the error function.
:return: a trained quantifier
"""
if hasattr(self, 'best_model_'):
return self.best_model_
raise ValueError('best_model called before fit')
| 2.203125 | 2 |
flasky.py | ZxShane/slam_hospital | 0 | 2079 | # -*- coding: utf-8 -*-
import os
from flask_migrate import Migrate
from app import create_app, db
from app.models import User, Role, PoseToLocation
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
migrate = Migrate(app, db)
# migrate 的新建 我们需要扫描到这些文件我们才能创建
@app.shell_context_processor
def make_shell_context():
return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation)
# 单元测试
@app.cli.command()
def test():
""" run the unit tests """
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
| 2.46875 | 2 |
python/day09/smoke_basin.py | aesdeef/advent-of-code-2021 | 2 | 2080 | INPUT_FILE = "../../input/09.txt"
Point = tuple[int, int]
Heightmap = dict[Point, int]
Basin = set[Point]
def parse_input() -> Heightmap:
"""
Parses the input and returns a Heightmap
"""
with open(INPUT_FILE) as f:
heights = [[int(x) for x in line.strip()] for line in f]
heightmap: Heightmap = dict()
for (y, row) in enumerate(heights):
for (x, height) in enumerate(row):
heightmap[(x, y)] = height
return heightmap
def get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]:
"""
Returns a set of surrounding points within the heightmap
"""
x, y = point
return {
(x - 1, y),
(x, y - 1),
(x, y + 1),
(x + 1, y),
} & heightmap.keys()
def get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]:
"""
Returns the heights of points surrounding the given point
"""
surrounding_points = get_surrounding_points(heightmap, point)
return {heightmap[point] for point in surrounding_points}
def get_low_points(heightmap: Heightmap) -> set[Point]:
"""
Finds the low points on the heightmap
"""
low_points: set[Point] = set()
for point in heightmap:
surrounding_heights = get_surrounding_heights(heightmap, point)
if all(heightmap[point] < height for height in surrounding_heights):
low_points.add(point)
return low_points
def solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int:
"""
Calculates the sum of the risk levels of all low points
"""
return sum(1 + heightmap[point] for point in low_points)
def get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]:
"""
Finds all basins on the heightmap
"""
basins: list[Basin] = []
for low_point in low_points:
basin: Basin = set()
points_to_consider = {low_point}
while points_to_consider:
point = points_to_consider.pop()
if heightmap[point] == 9:
continue
surrounding_points = get_surrounding_points(heightmap, point)
points_to_consider.update(surrounding_points - basin)
basin.add(point)
basins.append(basin)
return basins
def solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int:
"""
Calculates the product of the sizes of the three largest basins
"""
basins = get_basins(heightmap, low_points)
basin_sizes = sorted((len(basin) for basin in basins), reverse=True)
return basin_sizes[0] * basin_sizes[1] * basin_sizes[2]
if __name__ == "__main__":
heightmap = parse_input()
low_points = get_low_points(heightmap)
part1 = solve_part1(heightmap, low_points)
part2 = solve_part2(heightmap, low_points)
print(part1)
print(part2)
| 4.0625 | 4 |
playground.py | NHGmaniac/voctoconfig | 0 | 2081 | <filename>playground.py
#!/usr/bin/env python3
import signal
import logging
import sys
from gi.repository import GObject
GObject.threads_init()
import time
from lib.args import Args
from lib.loghandler import LogHandler
import lib.connection as Connection
def testCallback(args):
log = logging.getLogger("Test")
log.info(str(args))
class Voctoconfig(object):
def __init__(self):
self.log = logging.getLogger("Voctoconfig")
self.log.debug("Creating GObject Mainloop")
self.mainloop = GObject.MainLoop()
def run(self):
self.log.info("Running MainLoop")
try:
self.mainloop.run()
except KeyboardInterrupt:
self.log.info("Terminated via KeyboardInterrupt")
def quit(self):
self.log.info("Quitting MainLoop")
self.mainloop.quit()
def main():
docolor = (Args.color == 'always') or (Args.color == 'auto' and
sys.stderr.isatty())
loghandler = LogHandler(docolor, Args.timestamp)
logging.root.addHandler(loghandler)
if Args.verbose >= 2:
level = logging.DEBUG
elif Args.verbose == 1:
level = logging.INFO
else:
level = logging.WARNING
logging.root.setLevel(level)
logging.debug('setting SIGINT handler')
signal.signal(signal.SIGINT, signal.SIG_DFL)
Connection.establish(Args.host)
Connection.enterNonblockingMode()
Connection.on("message", testCallback)
mainloop = GObject.MainLoop()
mainloop.run()
while True:
logging.debug("mimimi...")
Connection.send("message", "test2")
time.sleep(10)
if __name__ == '__main__':
main() | 2.46875 | 2 |
tianshou/utils/logger/tensorboard.py | Aceticia/tianshou | 4,714 | 2082 | <reponame>Aceticia/tianshou<filename>tianshou/utils/logger/tensorboard.py
import warnings
from typing import Any, Callable, Optional, Tuple
from tensorboard.backend.event_processing import event_accumulator
from torch.utils.tensorboard import SummaryWriter
from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger
class TensorboardLogger(BaseLogger):
"""A logger that relies on tensorboard SummaryWriter by default to visualize \
and log statistics.
:param SummaryWriter writer: the writer to log data.
:param int train_interval: the log interval in log_train_data(). Default to 1000.
:param int test_interval: the log interval in log_test_data(). Default to 1.
:param int update_interval: the log interval in log_update_data(). Default to 1000.
:param int save_interval: the save interval in save_data(). Default to 1 (save at
the end of each epoch).
"""
def __init__(
self,
writer: SummaryWriter,
train_interval: int = 1000,
test_interval: int = 1,
update_interval: int = 1000,
save_interval: int = 1,
) -> None:
super().__init__(train_interval, test_interval, update_interval)
self.save_interval = save_interval
self.last_save_step = -1
self.writer = writer
def write(self, step_type: str, step: int, data: LOG_DATA_TYPE) -> None:
for k, v in data.items():
self.writer.add_scalar(k, v, global_step=step)
def save_data(
self,
epoch: int,
env_step: int,
gradient_step: int,
save_checkpoint_fn: Optional[Callable[[int, int, int], None]] = None,
) -> None:
if save_checkpoint_fn and epoch - self.last_save_step >= self.save_interval:
self.last_save_step = epoch
save_checkpoint_fn(epoch, env_step, gradient_step)
self.write("save/epoch", epoch, {"save/epoch": epoch})
self.write("save/env_step", env_step, {"save/env_step": env_step})
self.write(
"save/gradient_step", gradient_step,
{"save/gradient_step": gradient_step}
)
def restore_data(self) -> Tuple[int, int, int]:
ea = event_accumulator.EventAccumulator(self.writer.log_dir)
ea.Reload()
try: # epoch / gradient_step
epoch = ea.scalars.Items("save/epoch")[-1].step
self.last_save_step = self.last_log_test_step = epoch
gradient_step = ea.scalars.Items("save/gradient_step")[-1].step
self.last_log_update_step = gradient_step
except KeyError:
epoch, gradient_step = 0, 0
try: # offline trainer doesn't have env_step
env_step = ea.scalars.Items("save/env_step")[-1].step
self.last_log_train_step = env_step
except KeyError:
env_step = 0
return epoch, env_step, gradient_step
class BasicLogger(TensorboardLogger):
"""BasicLogger has changed its name to TensorboardLogger in #427.
This class is for compatibility.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn(
"Deprecated soon: BasicLogger has renamed to TensorboardLogger in #427."
)
super().__init__(*args, **kwargs)
| 2.15625 | 2 |
PythonAPI/pythonwrappers/jetfuel/gui/menu.py | InsightGit/JetfuelGameEngine | 4 | 2083 | # Jetfuel Game Engine- A SDL-based 2D game-engine
# Copyright (C) 2018 InfernoStudios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ctypes import c_uint
from ctypes import c_int
from ctypes import c_void_p
from ctypes import c_bool
from ctypes import c_wchar_p
from jetfuel.draw.rectangleinterface import rectangle_interface
from jetfuel.draw.image import image
class menu(rectangle_interface):
def __init__(self, jetfuelsoloader, maxheight=None, columngap=None,
buttongap=None):
self._jetfuel = jetfuelsoloader.jetfuelso;
if(maxheight is not None and columngap is not None and
buttongap is not None):
self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint,
c_uint,
c_uint];
self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p;
self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps(
maxheight,
columngap,
buttongap);
else:
self._jetfuel.Menu_new.restype = c_void_p;
self.drawableref = self._jetfuel.Menu_new();
print("Constructed empty drawableref!");
def get_max_height(self):
self._jetfuel.Menu_get_max_height.argtypes = [c_void_p];
self._jetfuel.Menu_get_max_height.restype = c_uint;
return self._jetfuel.Menu_get_max_height(self.drawableref);
def set_max_height(self, maxheight):
self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint];
self._jetfuel.Menu_set_max_height(self.drawableref, maxheight);
def get_column_gap(self):
self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p];
self._jetfuel.Menu_get_column_gap.restype = c_uint;
return self._jetfuel.Menu_get_column_gap(self.drawableref);
def set_column_gap(self, columngap):
self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint];
self._jetfuel.Menu_set_column_height(self.drawableref, columngap);
def get_button_gap(self):
self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p];
self._jetfuel.Menu_get_button_gap.restype = c_uint;
return self._jetfuel.Menu_get_column_gap(self.drawableref);
def set_button_gap(self, buttongap):
self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint];
self._jetfuel.Menu_set_max_height(self.drawableref, buttongap);
def get_container_box_image(self, jetfuelsoloader):
self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p];
self._jetfuel.Menu_get_container_box_image.restype = c_void_p;
containerboximage = image(jetfuelsoloader);
self._jetfuel.Image_delete.argtypes = [c_void_p];
self._jetfuel.Image_delete(containerboximage.imageref);
containerboximage.imageref = self._jetfuel.Menu_get_container_box_image(
self.drawableref);
return containerboximage;
def set_container_box_image(self, image, borderwidth, borderheight):
self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p,
c_void_p, c_uint,
c_uint];
self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth,
borderheight);
def get_container_box_border_width(self):
self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p];
self._jetfuel.Menu_get_container_box_border_width.restype = c_uint;
return self._jetfuel.Menu_get_container_box_border_width(
self.drawableref);
def get_container_box_border_height(self):
self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p];
self._jetfuel.Menu_get_container_box_border_height.restype = c_uint;
return self._jetfuel.Menu_get_container_box_border_height(
self.drawableref);
def add_button(self, buttoncharsreplacement, uisactiontowatchfor,
messagetosenduponclick, messagebus):
self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p,
c_wchar_p, c_wchar_p,
c_void_p];
self._jetfuel.Menu_add_button.restype = c_bool;
return self._jetfuel.Menu_add_button(self.drawableref,
buttoncharsreplacement.buttoncharsref,
uisactiontowatchfor,
messagetosenduponclick,
messagebus.messagebusref);
def get_position_x(self):
self._jetfuel.Menu_get_position_x.argtypes = [c_void_p];
self._jetfuel.Menu_get_position_x.restype = c_int;
return self.Menu_get_position_x(self.drawableref);
def get_position_y(self):
self._jetfuel.Menu_get_position_y.argtypes = [c_void_p];
self._jetfuel.Menu_get_position_y.restype = c_int;
return self.Menu_get_position_y(self.drawableref);
def set_position(self, x, y):
self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int];
self._jetfuel.Menu_set_position(self.drawableref, x, y);
def get_rect_to_draw_width(self):
self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p];
self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int;
return self.Menu_get_rect_to_draw_width(self.drawableref);
def get_rect_to_draw_height(self):
self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p];
self._jetfuel.Menu_get_rect_to_draw_height.restype = c_int;
return self.Menu_get_rect_to_draw_height(self.drawableref);
| 2.671875 | 3 |
latent_programmer/decomposition_transformer_attention/train.py | ParikhKadam/google-research | 2 | 2084 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Train seq-to-seq model on random supervised training tasks."""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import collections
import functools
import json
import os
import random
import sys
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import linen as nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow.compat.v2 as tf
from latent_programmer import decode
from latent_programmer import models as base_models
from latent_programmer.decomposition_transformer_attention import decomposition_models as models
from latent_programmer.decomposition_transformer_attention import input_pipeline
from latent_programmer.tasks.robust_fill import dsl
from latent_programmer.tasks.robust_fill import tokens as dsl_tokens
sys.path.append('../../')
gfile = tf.io.gfile
FLAGS = flags.FLAGS
flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.')
flags.DEFINE_float('lr', 1e-3, 'Learning rate.')
flags.DEFINE_float('weight_decay', 1e-1,
'Decay factor for AdamW-style weight decay.')
flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.')
flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.')
flags.DEFINE_integer('num_heads', 4, 'Number of layers.')
flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.')
flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?')
flags.DEFINE_string('dataset_filepattern', None,
'Filepattern for TFRecord dataset.')
flags.DEFINE_integer('per_device_batch_size', 16,
'Number of program tasks in a batch.')
flags.DEFINE_integer('num_strings_per_task', 4,
'Number of input/output strings per task.')
flags.DEFINE_integer('max_program_length', 100,
'Maximum number of tokens in program.')
flags.DEFINE_integer('max_characters', 120,
'Maximum number of characters in input/output strings.')
flags.DEFINE_string('save_dir', None, 'Directory to save results to.')
flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.')
flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.')
flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training logs.')
flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.')
flags.DEFINE_integer('predict_freq', 50000,
'Number of steps between prediction (beam search).')
flags.DEFINE_integer('checkpoint_freq', 50000,
'Number of steps between checkpoint saves.')
flags.DEFINE_integer('finetune_start_step', -1,
'Step the initial checkpoint should start at for '
'finetuning, or -1 if not finetuning.')
flags.DEFINE_bool('restore_checkpoints', True,
'Whether to restore from existing model checkpoints.')
flags.DEFINE_string('attention_mask_type', 'bos_full_attention',
'The kind of attention mask to use. Options are: baseline, '
'bos_to_bos, bos_full_attention')
flags.DEFINE_bool('use_relative_attention', True,
'Whether to use relative positonal embeddings.')
flags.DEFINE_bool('bos_special_attention', False,
'Whether to use special relative attention computation for '
'BOS tokens.')
_internal = False
if not _internal:
flags.DEFINE_string('xm_parameters', None,
'String specifying hyperparamter search.')
def create_learning_rate_scheduler(
base_learning_rate=0.5,
factors='constant * linear_warmup * rsqrt_normalized_decay',
warmup_steps=16000,
decay_factor=0.5,
steps_per_decay=50000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
base_learning_rate: float, the starting constant for the lr schedule.
factors: a string with factors separated by '*' that defines the schedule.
warmup_steps: how many steps to warm up for in the warmup schedule.
decay_factor: The amount to decay the learning rate by.
steps_per_decay: How often to decay the learning rate.
steps_per_cycle: Steps per cycle when using cosine decay.
Returns:
A function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def compute_weighted_cross_entropy(logits, targets, weights=None):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
onehot_targets = common_utils.onehot(targets, logits.shape[-1])
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: `[batch, length, num_classes]` float array.
targets: categorical targets `[batch, length]` int array.
weights: None or array of shape [batch, length, 1]
Returns:
Tuple of scalar accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = jnp.prod(jnp.asarray(targets.shape))
if weights is not None:
acc = acc * weights
normalizing_factor = weights.sum()
return acc.sum(), normalizing_factor
def compute_metrics(logits, targets, weights):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)
acc, _ = compute_weighted_accuracy(logits, targets, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
# Train / eval / decode step functions.
# -----------------------------------------------------------------------------
def train_step(optimizer,
inputs,
outputs,
programs,
learning_rate_fn,
config,
dropout_rng):
"""Train on batch of program tasks."""
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)
def loss_fn(params):
"""Loss function used for training."""
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
programs,
rngs={'dropout': dropout_rng})
loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
# Get metrics.
metrics = compute_metrics(logits, programs, weights)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(params, inputs, outputs, programs, eos_token, config):
"""Collect metrics for evaluation during training."""
weights = jnp.where(
jnp.logical_and(programs > 0,
jnp.logical_and(programs != config.base_config.bos_token,
programs != eos_token)),
1, 0).astype(jnp.float32)
logits = models.DecomposeAttentionTransformer(config).apply(
{'params': params}, inputs, outputs, programs)
return compute_metrics(logits, programs, weights)
def initialize_cache(inputs, outputs, programs, max_decode_len, config):
"""Initialize a cache for a given input shape and max decode length."""
target_shape = (programs.shape[0], max_decode_len)
dtype = config.base_config.dtype
initial_variables = models.DecomposeAttentionTransformer(config).init(
jax.random.PRNGKey(0),
jnp.ones(inputs.shape, dtype),
jnp.ones(outputs.shape, dtype),
jnp.ones(target_shape, dtype))
return initial_variables['cache']
def predict_step(params,
inputs,
outputs,
cache,
beam_size,
eos_token,
max_decode_len,
config,
slow_decode=True):
"""Predict translation with fast decoding beam search on a batch."""
# Prepare transformer fast-decoder call for beam search: for beam search, we
# need to set up our decoder model to handle a batch size equal to
# batch_size * beam_size, where each batch item's data is expanded in-place
# rather than tiled.
flat_encoded = decode.flat_batch_beam_expand(
models.DecomposeAttentionTransformer(config).apply(
{'params': params},
inputs,
outputs,
method=models.DecomposeAttentionTransformer.encode),
beam_size)
encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)
flat_encoded_padding_mask = decode.flat_batch_beam_expand(
encoded_padding_mask, beam_size)
if slow_decode:
def tokens_ids_to_logits(flat_ids):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits = models.DecomposeAttentionTransformer(config=config).apply(
{'params': params},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
method=models.DecomposeAttentionTransformer.decode)
return flat_logits
else:
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.DecomposeAttentionTransformer(
config=config).apply(
{'params': params, 'cache': flat_cache},
flat_ids,
flat_encoded,
flat_encoded_padding_mask,
mutable=['cache'],
method=models.DecomposeAttentionTransformer.decode)
new_flat_cache = new_vars['cache']
# Remove singleton sequence-length dimension:
# [batch * beam, 1, vocab] --> [batch * beam, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
beam_seqs, _ = decode.beam_search(
inputs,
cache,
tokens_ids_to_logits,
beam_size=beam_size,
alpha=0.6,
bos_token=config.base_config.bos_token,
eos_token=eos_token,
max_decode_len=max_decode_len,
slow_decode=slow_decode)
# Beam search returns [n_batch, n_beam, n_length] with beam dimension
# sorted in increasing order of log-probability.
return beam_seqs
# Util functions for prediction
# -----------------------------------------------------------------------------
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
tile_dims = [1] * len(x.shape)
tile_dims[0] = batch_pad
return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return x.reshape((n_device * n_batch,) + tuple(remaining_dims))
def per_host_sum_pmap(in_tree):
"""Execute psum on in_tree's leaves over one device per host."""
host2devices = collections.defaultdict(list)
for d in jax.devices():
host2devices[d.host_id].append(d)
devices = [host2devices[k][0] for k in host2devices]
host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices)
def pre_pmap(xs):
return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)
def post_pmap(xs):
return jax.tree_map(lambda x: x[0], xs)
return post_pmap(host_psum(pre_pmap(in_tree)))
def eval_predicted(predicted, inputs, outputs, parse_beam_fn):
"""Evaluate predicted program beams."""
best_p, best_score = None, -1
# predicted shape [beam_size, length]
for beam in predicted[::-1]:
try:
p = parse_beam_fn(beam)
p_outs = [p(inp) for inp in inputs]
score = np.sum([p_out == out for p_out, out in zip(p_outs, outputs)])
if score > best_score:
best_p, best_score = p, score
except: # pylint: disable=bare-except
pass
if best_score >= len(inputs): # Found solution.
break
return best_p, best_score
def shorten(key):
splits = key.split('_')
return ''.join(s[0] for s in splits)
def main(_):
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
# BOS special attention only makes sense if we are using relative attention
# and it's not the baseline.
if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or
FLAGS.attention_mask_type == 'baseline'):
raise ValueError(
"bos_special_attention doesn't work when use_relative_attention={} and "
'attention_mask_type={}'.format(FLAGS.use_relative_attention,
FLAGS.attention_mask_type))
if not gfile.isdir(FLAGS.save_dir):
gfile.makedirs(FLAGS.save_dir)
hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr)
# Get hyperparmaters
if FLAGS.xm_parameters:
for key, value in json.loads(FLAGS.xm_parameters).items():
if key not in hparam_str_dict:
hparam_str_dict[key] = value
hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k]))
for k in sorted(hparam_str_dict.keys())])
# Number of local devices for this host.
n_devices = jax.local_device_count()
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.save_dir, 'tb', hparam_str))
batch_size = FLAGS.per_device_batch_size * n_devices
io_shape = (FLAGS.per_device_batch_size,
FLAGS.num_strings_per_task,
FLAGS.max_characters)
program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length)
# Setup DSL
# ---------------------------------------------------------------------------
# Build token tables.
id_char_table = {i+1: char for (i, char) in enumerate(dsl.CHARACTER)}
char_id_table = {char: id for id, char in id_char_table.items()}
id_token_table, token_id_table = dsl_tokens.build_token_tables()
io_vocab_size = len(char_id_table) + 1 # For padding.
program_vocab_size = len(token_id_table) + 1
bos_token = token_id_table[dsl.BOS]
eos_token = token_id_table[dsl.EOS]
# Parse io and program token sequences (for eval).
def decode_io(inputs, outputs):
"""Decode io examples tokens."""
def decode_str(s):
"""Decode string tokens."""
return ''.join([id_char_table[c_id] for c_id in s if c_id > 0])
inps, outs = [], []
for inp, out in zip(inputs, outputs):
inps.append(decode_str(inp))
outs.append(decode_str(out))
return inps, outs
def decode_program(program):
"""Decode program tokens."""
program = program[:np.argmax(program == eos_token) + 1].astype(np.int32)
program = program[program != bos_token]
try:
return dsl.decode_program(program.tolist(), id_token_table)
except: # pylint: disable=bare-except
return None # Program does not compile.
# Load Dataset
# ---------------------------------------------------------------------------
logging.info('Initializing dataset.')
if not FLAGS.dataset_filepattern:
raise ValueError('Must specify filepattern to dataset.')
# Training dataset.
logging.info('Loading dataset from %s', FLAGS.dataset_filepattern)
padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:])
logging.info('padded_shapes: %s', padded_shapes)
dataset = input_pipeline.create_dataset_from_tf_record(
FLAGS.dataset_filepattern, token_id_table, char_id_table)
dataset = dataset.padded_batch(
batch_size,
padded_shapes=padded_shapes,
drop_remainder=True)
# Split evaluation and training.
eval_ds = dataset.take(FLAGS.num_eval_steps)
# Decrease batch of predict dataset to handle beam search.
predict_ds = eval_ds.unbatch().padded_batch(
int(np.ceil(batch_size / 10)),
padded_shapes=padded_shapes)
train_ds = dataset.skip(FLAGS.num_eval_steps).repeat()
train_iter = train_ds.as_numpy_iterator()
# Build Model and Optimizer
# ---------------------------------------------------------------------------
use_dropout = False
base_config = base_models.TransformerConfig(
vocab_size=io_vocab_size,
output_vocab_size=program_vocab_size,
shift=True,
emb_dim=FLAGS.embedding_dim,
num_heads=FLAGS.num_heads,
num_layers=FLAGS.num_layers,
qkv_dim=FLAGS.embedding_dim,
mlp_dim=FLAGS.hidden_dim,
max_len=max(FLAGS.max_characters, FLAGS.max_program_length),
use_relative_attention=FLAGS.use_relative_attention,
deterministic=not use_dropout,
decode=False,
bos_token=bos_token)
train_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config,
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
eval_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config.replace(deterministic=not use_dropout),
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
predict_config = models.DecomposeAttentionTransformerConfig(
base_config=base_config.replace(
shift=False, deterministic=not use_dropout,
decode=not FLAGS.slow_decode),
attention_mask_type=FLAGS.attention_mask_type,
bos_special_attention=FLAGS.bos_special_attention)
rng = jax.random.PRNGKey(FLAGS.seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = jax.random.split(rng)
m = models.DecomposeAttentionTransformer(eval_config)
initial_variables = jax.jit(m.init)(
{'params': init_rng, 'dropout': init_rng},
jnp.ones(io_shape, jnp.float32),
jnp.ones(io_shape, jnp.float32),
jnp.ones(program_shape, jnp.float32))
optimizer_def = optim.Adam(
FLAGS.lr,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=FLAGS.weight_decay)
optimizer = optimizer_def.create(initial_variables['params'])
del initial_variables # Don't keep a copy of the initial model.
start_step = 0
if FLAGS.restore_checkpoints:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(
os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
logging.info('Found model checkpointed at step %d.', start_step)
if FLAGS.finetune_start_step > 0:
logging.info('Checking that start_step (%s) == finetune_start_step (%s)',
start_step, FLAGS.finetune_start_step)
assert start_step == FLAGS.finetune_start_step
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
# TODO(jxihong): Implement fast decoding.
assert FLAGS.slow_decode, 'Fast decoding is not implemented yet.'
if FLAGS.finetune_start_step <= 0:
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=FLAGS.lr)
else:
# Constant LR for finetuning.
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=FLAGS.lr,
factors='constant')
p_train_step = jax.pmap(
functools.partial(
train_step,
learning_rate_fn=learning_rate_fn,
config=train_config),
axis_name='batch')
p_eval_step = jax.pmap(
functools.partial(eval_step,
eos_token=eos_token,
config=eval_config),
axis_name='batch')
p_init_cache = jax.pmap(
functools.partial(
initialize_cache,
max_decode_len=FLAGS.max_program_length,
config=predict_config),
axis_name='batch')
p_pred_step = jax.pmap(
functools.partial(
predict_step,
eos_token=eos_token,
max_decode_len=FLAGS.max_program_length,
config=predict_config,
slow_decode=FLAGS.slow_decode),
axis_name='batch',
static_broadcasted_argnums=(4,))
# Main Train Loop
# ---------------------------------------------------------------------------
dropout_rng = jax.random.split(rng, jax.local_device_count())
del rng
metrics_all = []
tick = time.time()
for step in range(start_step, FLAGS.num_train_steps):
inputs, outputs, programs = common_utils.shard(next(train_iter))
optimizer, metrics, dropout_rng = p_train_step(
optimizer, inputs, outputs, programs, dropout_rng=dropout_rng)
metrics_all.append(metrics)
is_last_step = step == FLAGS.num_train_steps - 1
# Save a Checkpoint
if (step % FLAGS.checkpoint_freq == 0 and step > 0) or is_last_step:
if jax.host_id() == 0:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(
os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str),
jax_utils.unreplicate(optimizer),
step)
# Periodic metric handling.
# Training Metrics
if (step and step % FLAGS.log_freq == 0) or is_last_step:
logging.info('Gathering training metrics.')
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(
lambda x: x / denominator, # pylint: disable=cell-var-from-loop
metrics_sums)
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
if jax.host_id() == 0:
logging.info('Train in step: %d, loss: %.4f', step, summary['loss'])
tock = time.time()
steps_per_sec = FLAGS.log_freq / (tock - tick)
tick = tock
summary_writer.scalar('train/steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar('train/' + key, val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Evaluation Metrics
if (step and step % FLAGS.eval_freq == 0) or is_last_step:
logging.info('Gathering evaluation metrics.')
t_evaluation_start = time.time()
eval_metrics = []
for batches in eval_ds.as_numpy_iterator():
inputs, outputs, programs = common_utils.shard(batches)
metrics = p_eval_step(optimizer.target, inputs, outputs, programs)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
if jax.host_id() == 0:
logging.info('Evaluation time: %.4f s step %d, loss: %.4f.',
time.time()-t_evaluation_start, step, eval_summary['loss'])
for key, val in eval_summary.items():
summary_writer.scalar('eval/' + key, val, step)
summary_writer.flush()
# Beam search metrics.
if (step and step % FLAGS.predict_freq == 0) or is_last_step:
logging.info('Gathering beam search metrics.')
for beam_size in [1, 5, 10, 20, 50]:
t_inference_start = time.time()
pred_acc = 0
pred_denominator = 0
ios, targets, predictions, top_of_beams = [], [], [], []
for batches in predict_ds.as_numpy_iterator():
pred_batch = batches
# Handle final odd-sized batch by padding instead of dropping it.
cur_pred_batch_size = pred_batch[0].shape[0]
if cur_pred_batch_size % n_devices:
padded_size = int(
np.ceil(cur_pred_batch_size / n_devices) * n_devices)
# pylint: disable=cell-var-from-loop
pred_batch = jax.tree_map(
lambda x: pad_examples(x, padded_size), pred_batch)
inputs, outputs, programs = common_utils.shard(pred_batch)
cache = (p_init_cache(inputs, outputs, programs)
if not FLAGS.slow_decode else None)
predicted = p_pred_step(optimizer.target, inputs, outputs, cache,
beam_size)
predicted = tohost(predicted)
inputs, outputs, programs = map(tohost, (inputs, outputs, programs))
pred_denominator += programs.shape[0]
for i, beams in enumerate(predicted):
inps, outs = decode_io(inputs[i], outputs[i])
p, p_score = eval_predicted(
beams, inps, outs, parse_beam_fn=decode_program)
if p_score >= len(inps):
pred_acc += 1
ios.append(' ; '.join(map(str, zip(inps, outs))))
targets.append(decode_program(programs[i]).to_string())
try:
predictions.append(p.to_string())
except: # pylint: disable=bare-except
predictions.append('Did not compile')
logging.info('ios: %s', ios[-1])
logging.info('target: %s', targets[-1])
beams_log = []
for beam in beams:
try:
beams_log.append(decode_program(beam).to_string())
except: # pylint: disable=bare-except
beams_log.append('Did not compile')
logging.info('predicted beam: %s', '\n'.join(beams_log))
top_of_beam = []
for index, beam in enumerate(beams[:-5:-1]):
try:
decoded_program = decode_program(beam).to_string()
except: # pylint: disable=bare-except
decoded_program = 'Did not compile'
top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format(
index, decoded_program, beam))
top_of_beams.append('\n\n'.join(top_of_beam))
all_pred_acc, all_pred_denominator = per_host_sum_pmap(
jax.tree_map(np.array, (pred_acc, pred_denominator)))
# Record beam search results as text summaries.
message = []
for n in np.random.choice(np.arange(len(predictions)), 8):
text = (f'ios: {ios[n]}\n\ntarget: {targets[n]}\n\n'
f'predicted: {predictions[n]}\n\n'
f'top of beam:\n\n{top_of_beams[n]}\n\n')
message.append(text)
# Write to tensorboard.
if jax.host_id() == 0:
slow_or_fast = 'slow' if FLAGS.slow_decode else 'fast'
logging.info(
'Prediction time, %s (beam %d): %.4f s, step %d, score %.4f',
slow_or_fast, beam_size, time.time() - t_inference_start, step,
all_pred_acc / all_pred_denominator)
summary_writer.scalar(
'predict-{}/score-{}'.format(slow_or_fast, beam_size),
all_pred_acc / all_pred_denominator, step)
summary_writer.text('samples-{}'.format(beam_size),
'\n------\n'.join(message), step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
| 1.523438 | 2 |
plot/profile_interpolation/plot_profile.py | ziyixi/SeisScripts | 0 | 2085 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import click
import numba
def prepare_data(data_pd, parameter):
lon_set = set(data_pd["lon"])
lat_set = set(data_pd["lat"])
dep_set = set(data_pd["dep"])
lon_list = sorted(lon_set)
lat_list = sorted(lat_set)
dep_list = sorted(dep_set)
lon_mesh, lat_mesh, dep_mesh = np.meshgrid(
lon_list, lat_list, dep_list, indexing="ij")
dx, dy, dz = np.shape(lon_mesh)
value_mesh = np.zeros_like(lon_mesh)
x_mesh = np.zeros_like(lon_mesh)
y_mesh = np.zeros_like(lon_mesh)
z_mesh = np.zeros_like(lon_mesh)
r_mesh = np.zeros_like(lon_mesh)
for i in range(dx):
for j in range(dy):
for k in range(dz):
x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr(
lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j, k])
for index, row in data_pd.iterrows():
i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0))
j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0))
k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0))
value_mesh[i, j, k] = row[parameter]
return x_mesh, y_mesh, z_mesh, value_mesh
def get_value(data_pd, lat, lon, dep, parameter):
return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon == lon) & (data_pd.dep == dep)][parameter].values[0]
@numba.njit()
def lld2xyzr(lat, lon, dep):
R_EARTH_KM = 6371.0
r = (R_EARTH_KM-dep)/R_EARTH_KM
theta = 90-lat
phi = lon
z = r*cosd(theta)
h = r*sind(theta)
x = h*cosd(phi)
y = h*sind(phi)
return (x, y, z, r)
@numba.njit()
def cosd(x):
return np.cos(np.deg2rad(x))
@numba.njit()
def sind(x):
return np.sin(np.deg2rad(x))
# def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh):
# value_func = RegularGridInterpolator(
# (x_mesh, y_mesh, z_mesh), value_mesh, method="nearest")
# return value_func
@numba.njit()
def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh):
x, y, z, _ = lld2xyzr(lat, lon, dep)
distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2
mindistance2 = np.min(distance2)
coors = np.where(distance2 == mindistance2)
value = value_mesh[coors[0][0], coors[1][0], coors[2][0]]
return value
def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts):
lons = np.linspace(lon_list[0], lon_list[1], hnpts)
lats = np.linspace(lat_list[0], lat_list[1], hnpts)
deps = np.linspace(dep_list[0], dep_list[1], vnpts)
return lons, lats, deps
@click.command()
@click.option('--lon1', required=True, type=float, help="lon1")
@click.option('--lon2', required=True, type=float, help="lon2")
@click.option('--lat1', required=True, type=float, help="lat1")
@click.option('--lat2', required=True, type=float, help="lat2")
@click.option('--dep1', required=True, type=float, help="dep1")
@click.option('--dep2', required=True, type=float, help="dep2")
@click.option('--data', required=True, type=str, help="the pickle file")
@click.option('--parameter', required=True, type=str, help="physicial parameter to plot")
@click.option('--hnpts', required=True, type=int, help="horizontal npts")
@click.option('--vnpts', required=True, type=int, help="vertical npts")
def main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts):
lon_list = [lon1, lon2]
lat_list = [lat1, lat2]
dep_list = [dep1, dep2]
data_pd_raw = pd.read_pickle(data)
# data_pd is too big
minlon = min(lon1, lon2)
maxlon = max(lon1, lon2)
minlat = min(lat1, lat2)
maxlat = max(lat1, lat2)
mindep = min(dep1, dep2)
maxdep = max(dep1, dep2)
data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & (
data_pd_raw.lat >= minlat) & (data_pd_raw.lon < maxlon) & (data_pd_raw.lon > minlon) & (data_pd_raw.dep >= mindep) & (data_pd_raw.dep <= maxdep)]
x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter)
lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids(
lon_list, lat_list, dep_list, hnpts, vnpts)
values = np.zeros((hnpts, vnpts))
for ih in range(hnpts):
for iv in range(vnpts):
values[ih, iv] = interp_value(
lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh)
# print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv])
# plotting part
plt.figure()
mesh_plot_lat, mesh_plot_dep = np.meshgrid(
lats_plot, deps_plot, indexing="ij")
# get vmin and vmax
vmin_round = round(np.min(values), 2)
if(vmin_round < np.min(values)):
vmin = vmin_round
else:
vmin = vmin_round-0.01
vmax_round = round(np.max(values), 2)
if(vmax_round > np.max(values)):
vmax = vmax_round
else:
vmax = vmax_round+0.01
print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round)
plt.contourf(mesh_plot_lat, mesh_plot_dep,
values, 101, cmap=plt.cm.seismic_r)
v = np.arange(vmin, vmax, 0.01)
plt.colorbar(ticks=v, label="perturbation")
plt.gca().invert_yaxis()
plt.xlabel(
f"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)")
plt.ylabel("depth(km)")
plt.show()
if __name__ == "__main__":
main()
| 2.703125 | 3 |
tests/test_heroku.py | edpaget/flask-appconfig | 61 | 2086 | from flask import Flask
from flask_appconfig import HerokuConfig
def create_sample_app():
app = Flask('testapp')
HerokuConfig(app)
return app
def test_herokupostgres(monkeypatch):
monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri')
app = create_sample_app()
assert app.config['SQLALCHEMY_DATABASE_URI'] == 'heroku-db-uri'
| 2.453125 | 2 |
flask/util/logger.py | Dev-Jahn/cms | 0 | 2087 | import logging
"""
Formatter
"""
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S')
"""
Set Flask logger
"""
logger = logging.getLogger('FLASK_LOG')
logger.setLevel(logging.DEBUG)
stream_log = logging.StreamHandler()
stream_log.setFormatter(formatter)
logger.addHandler(stream_log)
# if disabled
# logger.disabled = True
| 2.65625 | 3 |
utils/backups/backup_psql.py | Krovatkin/NewsBlur | 0 | 2088 | #!/usr/bin/python3
import os
import sys
import socket
CURRENT_DIR = os.path.dirname(__file__)
NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../'])
sys.path.insert(0, NEWSBLUR_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings'
import threading
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify, assume this is hooked up to a single filename
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
import time
import boto3
from django.conf import settings
BACKUP_DIR = '/srv/newsblur/backup/'
s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET)
hostname = socket.gethostname().replace('-','_')
s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime("%Y-%m-%d-%H-%M")}.sql'
path = os.listdir(BACKUP_DIR)[0]
full_path = os.path.join(BACKUP_DIR, path)
print('Uploading %s to %s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET))
s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path))
os.remove(full_path)
| 1.875 | 2 |
onap_tests/scenario/solution.py | Orange-OpenSource/xtesting-onap-tests | 0 | 2089 | <reponame>Orange-OpenSource/xtesting-onap-tests<filename>onap_tests/scenario/solution.py
#!/usr/bin/python
#
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# pylint: disable=missing-docstring
# pylint: disable=duplicate-code
import logging
import time
import onap_tests.components.aai as aai
import onap_tests.components.so as so
import onap_tests.components.sdnc as sdnc
import onap_tests.components.nbi as nbi
import onap_tests.utils.stack_checker as sc
import onap_tests.utils.utils as onap_utils
PROXY = onap_utils.get_config("general.proxy")
class Solution(object):
"""
VNF: Class to automate the instantiation of a VNF
It is assumed that the Design phase has been already done
The yaml template is available and stored in the template directory
TODO: automate the design phase
"""
__logger = logging.getLogger(__name__)
def __init__(self, **kwargs):
"""Initialize Solution object."""
super(Solution, self).__init__()
self.vnf_config = {}
self.components = {}
if "case" not in kwargs:
# by convention is VNF is not precised we set mrf
kwargs["case"] = "mrf"
self.vnf_config["vnf"] = kwargs["case"]
if "nbi" in kwargs:
self.vnf_config["nbi"] = kwargs["nbi"]
# can be useful to destroy resources, sdnc module name shall be given
if "sdnc_vnf_name" in kwargs:
self.vnf_config["sdnc_vnf_name"] = kwargs["sdnc_vnf_name"]
# Random part = 6 last char of the the vnf name
self.vnf_config["random_string"] = kwargs["sdnc_vnf_name"][-6:]
else:
self.vnf_config["random_string"] = (
onap_utils.random_string_generator())
self.vnf_config["sdnc_vnf_name"] = (
onap_utils.get_config("onap.service.name") + "_" +
kwargs["case"] + "_" + self.vnf_config["random_string"])
vnf_list = list(onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.node_templates"))
vf_module_list = list(onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.groups"))
# Class attributes for instance, vnf and module VF
self.service_infos = {}
self.vnf_infos = {'list': vnf_list}
self.module_infos = {'list': vf_module_list}
# retrieve infos from the configuration files
self.set_service_instance_var()
self.set_vnf_var()
self.set_module_var()
self.set_onap_components()
def set_service_instance_var(self):
"""
set service instance variables from the config file
"""
self.vnf_config["vnf_name"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "metadata.name")
self.vnf_config["invariant_uuid"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "metadata.invariantUUID")
self.vnf_config["uuid"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "metadata.UUID")
def set_vnf_var(self):
"""
set vnf variables from the config file
"""
for i, elt in enumerate(self.vnf_infos['list']):
vnf_config = {}
self.__logger.info("get VNF %s info", elt)
vnf_config["vnf_customization_name"] = elt
vnf_config["vnf_model_name"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.node_templates." +
vnf_config["vnf_customization_name"] + ".metadata.name")
vnf_config["vnf_invariant_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.node_templates." +
vnf_config["vnf_customization_name"] +
".metadata.invariantUUID")
vnf_config["vnf_version_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.node_templates." +
vnf_config["vnf_customization_name"] + ".metadata.UUID")
vnf_config["vnf_customization_id"] = (
onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.node_templates." +
vnf_config["vnf_customization_name"] +
".metadata.customizationUUID"))
vnf_config["vnf_type"] = list(onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups"))[i]
vnf_config["vnf_generic_name"] = (
self.vnf_config["vnf_name"] + "-service-instance-" +
self.vnf_config["random_string"])
vnf_config["vnf_generic_type"] = (
self.vnf_config["vnf_name"] + "/" +
vnf_config["vnf_customization_name"])
self.vnf_config[elt] = vnf_config
def set_module_var(self):
"""
set module variables from the config file
"""
for elt in self.vnf_infos['list']:
vf_config = {}
# we cannot be sure that the modules are in teh same order
# than the vnf
vf_index = onap_utils.get_vf_module_index(
self.module_infos['list'],
elt)
vnf_type = list(onap_utils.get_template_param(
self.vnf_config["vnf"],
"topology_template.groups"))[vf_index]
self.__logger.info("Complete Module info for VNF %s", elt)
vf_config["sdnc_vnf_type"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type +
".metadata.vfModuleModelName")
vnf_param = (self.vnf_config["vnf"] + "." +
str(elt) + ".vnf_parameters")
vf_config["vnf_parameters"] = onap_utils.get_config(vnf_param)
vf_config["module_invariant_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelInvariantUUID")
vf_config["module_name_version_id"] = (
onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelUUID"))
vf_config["module_customization_id"] = (
onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelCustomizationUUID"))
vf_config["module_version_id"] = onap_utils.get_template_param(
self.vnf_config["vnf"], "topology_template.groups." +
vnf_type + ".metadata.vfModuleModelUUID")
self.vnf_config[elt].update(vf_config)
def set_onap_components(self):
"""
Set ONAP component objects
"""
self.components["aai"] = aai.Aai(PROXY, self.__logger)
self.components["so"] = so.So(PROXY, self.__logger)
self.components["sdnc"] = sdnc.Sdnc(PROXY, self.__logger)
self.components["nbi"] = nbi.Nbi(PROXY, self.__logger)
def instantiate(self):
"""
Instantiate a VNF with ONAP
* Create the service instance (SO)
* Create the VNF instance (SO)
* preload the VNF in the SDNC
* Create the VF module instance (SO)
"""
instance_info = {"instance_id": ""}
vnf_info = {"vnf_id": ""}
module_info = {}
module_ref = {"instanceId": ""}
module_ok = False
check_vnf = False
self.__logger.info("Start the instantiation of the VNF")
instance_info = self.create_service_instance()
service_ok = self.components["aai"].check_service_instance(
self.vnf_config["vnf_name"],
instance_info["instance_id"])
if service_ok:
# create VNF instance(s)
for elt in self.vnf_infos['list']:
vnf_info = self.create_vnf_instance(elt)
self.__logger.info("Check vnf %s ....", elt)
vnf_ok = True
self.__logger.info("Check vnf %s ....", elt)
if not self.components["aai"].check_vnf_instance(
vnf_info["vnf_id"]):
vnf_ok = False
break
else:
# preload VNF(s) in SDNC
self.preload(elt)
time.sleep(10)
if vnf_ok:
# create VF module(s)
for elt in self.vnf_infos['list']:
module_info = self.create_module_instance(elt)
module_ok = True
module_ref = module_info['module_instance']
if not self.components["aai"].check_module_instance(
vnf_info["vnf_id"],
module_ref["requestReferences"]["instanceId"]):
module_ok = False
break
else:
# check VNF using OpenStack directly
check_vnf = self.check_vnf(
self.module_infos[elt]["module_instance_name"])
if check_vnf:
self.__logger.info("Stack successfully checked")
return {"status": module_ok,
"instance_id": instance_info,
"vnf_info": vnf_info,
"module_info": module_info,
"check_heat": check_vnf}
def clean(self):
"""
Clean VNF from ONAP
Args:
instance_id: The ID of the VNF service instance
vnf_id: The ID of the VNF instance
module_id: The ID of the VF module instance
"""
instance_id = self.service_infos['instance_id']
for elt in self.vnf_infos['list']:
vnf_id = self.vnf_infos[elt]["vnf_id"]
module_id = (self.module_infos[elt]["module_instance"]
["requestReferences"]["instanceId"])
self.clean_module(elt)
if not self.components["aai"].check_module_cleaned(vnf_id,
module_id):
return False
else:
self.clean_vnf(elt)
if not self.components["aai"].check_vnf_cleaned(vnf_id):
return False
else:
self.clean_instance(instance_id)
if self.components["aai"].check_service_instance_cleaned(
self.vnf_config["vnf_name"], instance_id):
self.__logger.debug("Instance still in AAI DB")
else:
return False
time.sleep(10)
self.clean_preload(elt)
return True
def create_service_instance(self):
"""
Create service instance
2 options to create the instance
* with SO
* with NBI
"""
instance_id = None
model_info = self.components["so"].get_service_model_info(
self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])
if self.vnf_config["nbi"]:
self.__logger.info("1) Create Service instance from NBI")
self.__logger.info("***********************************")
request_info = self.components["nbi"].get_request_info()
service_payload = (
self.components["nbi"].get_nbi_service_order_payload())
nbi_info = self.components["nbi"].create_service_order_nbi(
service_payload)
time.sleep(5)
instance_id = (
self.components["nbi"].get_service_instance_id_from_order(
nbi_info["id"]))
else:
self.__logger.info("1) Create Service instance in SO")
self.__logger.info("********************************")
request_info = self.components["so"].get_request_info(
self.vnf_config["vnf"] + "-service-instance-" +
self.vnf_config['random_string'])
service_payload = self.components["so"].get_service_payload(
self.vnf_config["vnf"],
request_info,
model_info)
instance_id = self.components["so"].create_instance(
service_payload)
service_instance_info = {"instance_id": instance_id,
"request_info": request_info,
"service_payload": service_payload}
self.__logger.info("Service instance created: %s",
service_instance_info)
self.service_infos = service_instance_info
return service_instance_info
def create_vnf_instance(self, elt):
"""
Create VNF instance
Args:
* elt: the VNF
"""
vnf_id = None
self.__logger.info("2) Create VNF instance in SO")
self.__logger.info("****************************")
model_info = self.components["so"].get_vnf_model_info(
self.vnf_config[elt]['vnf_invariant_id'],
self.vnf_config[elt]['vnf_version_id'],
self.vnf_config[elt]['vnf_model_name'],
self.vnf_config[elt]['vnf_customization_id'],
self.vnf_config[elt]['vnf_customization_name'])
vnf_related_instance = self.components["so"].get_vnf_related_instance(
self.service_infos["instance_id"],
self.vnf_config['invariant_uuid'],
self.vnf_config['uuid'])
vnf_instance_name = (self.vnf_config["vnf"] + "-vnf-instance-" +
str(elt).replace(" ", "_") + ("_") +
self.vnf_config['random_string'])
request_info = self.components["so"].get_request_info(
vnf_instance_name)
vnf_payload = self.components["so"].get_vnf_payload(
self.vnf_config["vnf"],
request_info,
model_info,
vnf_related_instance)
# self.__logger.debug("VNF payload: %s", vnf_payload)
vnf_id = self.components["so"].create_vnf(
self.service_infos["instance_id"],
vnf_payload)
vnf_info = {"vnf_id": vnf_id,
"vnf_instance_name": vnf_instance_name,
"vnf_payload": vnf_payload,
"vnf_related_instance": vnf_related_instance}
self.__logger.info(">>>> SO vnf instance created %s", vnf_info)
self.vnf_infos[elt] = vnf_info
return vnf_info
def preload(self, elt):
"""
Preload VNF in SDNC
Args:
* elt: the VNF
"""
vnf_preload_infos = {}
self.__logger.info("3) Preload VNF %s in SDNC", elt)
self.__logger.info("*******************************")
vnf_name = (self.vnf_config["vnf"] +
"-vfmodule-instance-" +
str(elt).replace(" ", "_") + "_" +
self.vnf_config['random_string'])
vnf_topology_identifier = {
"generic-vnf-name": vnf_name,
"generic-vnf-type": (
self.vnf_config[elt]['vnf_generic_type']),
"service-type": self.service_infos["instance_id"],
"vnf-name": vnf_name,
"vnf-type": self.vnf_config[elt]['sdnc_vnf_type']}
sdnc_payload = self.components["sdnc"].get_preload_payload(
self.vnf_config[elt]['vnf_parameters'],
vnf_topology_identifier)
self.__logger.info("SDNC preload payload %s", sdnc_payload)
sdnc_preload = self.components["sdnc"].preload(sdnc_payload)
self.__logger.debug("SDNC preload answer: %s", sdnc_preload)
vnf_preload_infos[elt] = ({"sdnc_payload": sdnc_payload,
"sdnc_preload": sdnc_preload})
return vnf_preload_infos[elt]
def create_module_instance(self, elt):
"""
Create module instance
Args:
* instance_info: dict including the instance_id, the request_info and
the service payload
* vnf_info: dict including the vnf_id, vnf_related_instance and the
vnf payload
"""
module_info = {}
self.__logger.info("4) Create MODULE %s instance in SO", elt)
self.__logger.info("***************************************")
module_model_info = self.components["so"].get_module_model_info(
self.vnf_config[elt]['module_invariant_id'],
self.vnf_config[elt]['module_name_version_id'],
self.vnf_config[elt]['sdnc_vnf_type'],
self.vnf_config[elt]['module_customization_id'],
self.vnf_config[elt]['module_version_id'])
module_related_instance = (
self.components["so"].get_module_related_instance(
self.vnf_infos[elt]["vnf_id"],
self.vnf_config[elt]['vnf_invariant_id'],
self.vnf_config[elt]['vnf_version_id'],
self.vnf_config[elt]['vnf_model_name'],
self.vnf_config[elt]['vnf_customization_id'],
self.vnf_config[elt]['vnf_customization_name']))
module_instance_name = (self.vnf_config["vnf"] +
"-vfmodule-instance-" +
str(elt).replace(" ", "_") + "_" +
self.vnf_config['random_string'])
request_info = self.components["so"].get_request_info(
module_instance_name)
module_payload = self.components["so"].get_module_payload(
self.vnf_config["vnf"],
request_info,
module_model_info,
self.vnf_infos[elt]["vnf_related_instance"],
module_related_instance)
self.__logger.debug("Module payload %s", module_payload)
module_instance = self.components["so"].create_module(
self.service_infos["instance_id"],
self.vnf_infos[elt]["vnf_id"],
module_payload)
self.__logger.info(">>>> Module instance created: %s", module_instance)
module_info = (
{'module_instance': module_instance,
'module_instance_name': module_instance_name,
'module_payload': module_payload,
'module_model_info': module_model_info,
'module_related_instance': module_related_instance})
self.__logger.info("SO module vf(s) created: %s", module_info)
self.module_infos[elt] = module_info
return module_info
def check_vnf(self, stack_name):
"""
Check VNF stack has been properly started
"""
check_vnf = False
try:
my_stack_checker = sc.StackChecker()
if my_stack_checker.check_stack_is_complete(stack_name):
check_vnf = True
except Exception: # pylint: disable=broad-except
self.__logger.error("Impossible to find the stack %s in OpenStack",
stack_name)
return check_vnf
def clean_instance(self, instance_id):
"""
Clean VNF instance
Args:
* instance_id: The service instance of the VNF
"""
self.__logger.info(" Clean Service Instance ")
service_payload = self.components["so"].get_service_payload(
self.vnf_config["vnf"],
self.components["so"].get_request_info(
self.vnf_config['sdnc_vnf_name']),
self.components["so"].get_service_model_info(
self.vnf_config['invariant_uuid'],
self.vnf_config['uuid']))
self.components["so"].delete_instance(instance_id, service_payload)
def clean_vnf(self, elt):
"""
Clean VNF
Args:
* instance_id: The service instance of the VNF
* vnf_id:The VNF id of the VNF
"""
self.__logger.info(" Clean vnf Instance %s ", elt)
self.components["so"].delete_vnf(
self.service_infos["instance_id"],
self.vnf_infos[elt]["vnf_id"],
self.vnf_infos[elt]["vnf_payload"])
def clean_module(self, elt):
"""
Clean VNF Module
Args:
* instance_id: The service instance id of the VNF
* vnf_id:The VNF id of the VNF
* module_id: the VF module id of the VNF
"""
self.__logger.info(" Clean Module VF Instance %s ", elt)
instance_id = self.service_infos["instance_id"]
vnf_id = self.vnf_infos[elt]["vnf_id"]
module_id = (self.module_infos[elt]["module_instance"]
["requestReferences"]["instanceId"])
module_payload = self.module_infos[elt]["module_payload"]
self.components["so"].delete_module(
module_payload,
instance_id,
vnf_id,
module_id)
def clean_preload(self, elt):
"""
Clean VNF SDNC preload
"""
self.__logger.info(" Clean Preload of %s ", elt)
# if 1 of the expected preload clean is FAIL we return False
clean_preload = self.components["sdnc"].delete_preload(
self.module_infos[elt]["module_instance_name"],
self.vnf_config[elt]["sdnc_vnf_type"])
return clean_preload
def clean_all_preload(self):
"""
Clean VNF SDNC preload with the preload id
"""
self.__logger.info(" Clean Preload ")
for elt in self.vnf_infos['list']:
clean_preload = self.components["sdnc"].delete_preload(
self.module_infos[elt]["module_instance_name"],
self.vnf_config[elt]['sdnc_vnf_type'])
return clean_preload
def get_info(self):
"""
Get VNFs Info
"""
self.__logger.info("Class to manage VNFs")
self.__logger.info("VNF config: %s", self.vnf_config)
| 1.96875 | 2 |
tutorials/Controls4Docs/ControlEventsGraph.py | dominic-dev/pyformsd | 0 | 2090 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from __init__ import *
import random, time
from PyQt4 import QtCore
class SimpleExample(BaseWidget):
def __init__(self):
super(SimpleExample,self).__init__('Simple example')
#Definition of the forms fields
self._control0 = ControlEventsGraph('Check me')
self._control1 = ControlEventsGraph('Check me')
self._control2 = ControlEventsGraph('Check me')
self._control3 = ControlEventsGraph('Check me')
self._txt = ControlText('Time')
self._btn = ControlButton('Click')
self._btn1 = ControlButton('Click 1')
self._save = ControlButton('Save button')
self._load = ControlButton('Load button')
self.formset = [
('_btn','_btn1'),
('_control0','_control1'),
('_control2','_control3'),
'_txt',
('_save','_load')]
self._btn.value = self.__btn
self._btn1.value = self.__btn1
self._save.value = self.save_window
self._load.value = self.load_window
self._start = time.time()
self.INTERVAL = 500
self.N_TRACKS = 8
def __btn(self):
for i in range(40):
s = random.randint( 0, 10000 )
o = random.randint( 0, 1000 )
self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) )
#self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color="#00FFDD")
self._control0.value = 5000
def __addEvent0(self):
b = self._control0.value
e = b+self.INTERVAL
self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control0.value = e
self._txt.value = str(time.time() - self._start)
def __addEvent1(self):
b = self._control1.value
e = b+self.INTERVAL
self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control1.value = e
def __addEvent2(self):
b = self._control2.value
e = b+self.INTERVAL
self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control2.value = e
def __addEvent3(self):
b = self._control3.value
e = b+self.INTERVAL
self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) )
self._control3.value = e
def __btn1(self):
self._start = time.time()
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent0)
timer.start(self.INTERVAL)
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent1)
timer.start(self.INTERVAL)
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent2)
timer.start(self.INTERVAL)
timer = QtCore.QTimer(self.form)
timer.timeout.connect(self.__addEvent3)
timer.start(self.INTERVAL)
##################################################################################################################
##################################################################################################################
##################################################################################################################
#Execute the application
if __name__ == "__main__": pyforms.start_app( SimpleExample )
| 2.4375 | 2 |
annotation_gui_gcp/orthophoto_view.py | lioncorpo/sfm.lion-judge-corporation | 1 | 2091 | from typing import Tuple
import numpy as np
import rasterio.warp
from opensfm import features
from .orthophoto_manager import OrthoPhotoManager
from .view import View
class OrthoPhotoView(View):
def __init__(
self,
main_ui,
path: str,
init_lat: float,
init_lon: float,
is_geo_reference: bool = False,
):
"""[summary]
Args:
main_ui (GUI.Gui)
path (str): path containing geotiffs
"""
self.image_manager = OrthoPhotoManager(path, 100.0)
self.images_in_list = self.image_manager.image_keys
self.zoom_window_size_px = 500
self.is_geo_reference = is_geo_reference
self.size = 50 # TODO add widget for zoom level
super(OrthoPhotoView, self).__init__(main_ui, False)
self.refocus(init_lat, init_lon)
self.populate_image_list()
if self.images_in_list:
self.bring_new_image(self.images_in_list[0])
self.set_title()
def get_image(self, new_image):
crop, image_window, geot = self.image_manager.read_image_around_latlon(
new_image, self.center_lat, self.center_lon, self.size
)
self.image_window = image_window
self.geot = geot
return crop
def get_candidate_images(self):
return self.image_manager.get_candidate_images(
self.center_lat, self.center_lon, self.size
)
def pixel_to_latlon(self, x: float, y: float):
"""
From pixels (in the viewing window) to latlon
"""
if not self.is_geo_reference:
return None
# Pixel to whatever crs the image is in
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`.
x, y = self.geot.xy(y, x)
# And then to WSG84 (lat/lon)
lons, lats = rasterio.warp.transform(self.geot.crs, "EPSG:4326", [x], [y])
return lats[0], lons[0]
def gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from normalized coordinates (in the whole geotiff) to
pixels (in the viewing window)
"""
h, w = self.image_manager.get_image_size(self.current_image)
px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0]
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x = px[0] - self.image_window.col_off
y = px[1] - self.image_window.row_off
# pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`.
return [x, y]
def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from pixels (in the viewing window) to normalized coordinates
(in the whole geotiff)
"""
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x += self.image_window.col_off
y += self.image_window.row_off
h, w = self.image_manager.get_image_size(self.current_image)
coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0]
return coords.tolist()
def refocus(self, lat, lon):
self.center_lat = lat
self.center_lon = lon
self.populate_image_list()
if self.images_in_list:
if self.current_image not in self.images_in_list:
self.bring_new_image(self.images_in_list[0])
else:
self.bring_new_image(self.current_image)
self.set_title()
def bring_new_image(self, new_image):
super(OrthoPhotoView, self).bring_new_image(new_image, force=True)
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
artists = self.ax.plot(np.mean(xlim), np.mean(ylim), "rx")
self.plt_artists.extend(artists)
self.canvas.draw_idle()
def set_title(self):
lat, lon = self.center_lat, self.center_lon
if self.images_in_list:
t = "Images covering lat:{:.4f}, lon:{:.4f}".format(lat, lon)
shot = self.current_image
seq_ix = self.images_in_list.index(shot)
title = f"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}"
else:
title = f"No orthophotos around {lat}, {lon}"
self.current_image = None
self.ax.clear()
self.ax.axis("off")
self.canvas.draw_idle()
self.window.title(title)
| 2.40625 | 2 |
tempest/tests/lib/services/compute/test_security_group_default_rules_client.py | mail2nsrajesh/tempest | 254 | 2092 | <reponame>mail2nsrajesh/tempest<filename>tempest/tests/lib/services/compute/test_security_group_default_rules_client.py
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.compute import security_group_default_rules_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest):
FAKE_RULE = {
"from_port": 80,
"id": 1,
"ip_protocol": "TCP",
"ip_range": {
"cidr": "10.10.10.0/24"
},
"to_port": 80
}
def setUp(self):
super(TestSecurityGroupDefaultRulesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = (security_group_default_rules_client.
SecurityGroupDefaultRulesClient(fake_auth, 'compute',
'regionOne'))
def _test_list_security_group_default_rules(self, bytes_body=False):
self.check_service_client_function(
self.client.list_security_group_default_rules,
'tempest.lib.common.rest_client.RestClient.get',
{"security_group_default_rules": [self.FAKE_RULE]},
to_utf=bytes_body)
def test_list_security_group_default_rules_with_str_body(self):
self._test_list_security_group_default_rules()
def test_list_security_group_default_rules_with_bytes_body(self):
self._test_list_security_group_default_rules(bytes_body=True)
def _test_show_security_group_default_rule(self, bytes_body=False):
self.check_service_client_function(
self.client.show_security_group_default_rule,
'tempest.lib.common.rest_client.RestClient.get',
{"security_group_default_rule": self.FAKE_RULE},
to_utf=bytes_body,
security_group_default_rule_id=1)
def test_show_security_group_default_rule_with_str_body(self):
self._test_show_security_group_default_rule()
def test_show_security_group_default_rule_with_bytes_body(self):
self._test_show_security_group_default_rule(bytes_body=True)
def _test_create_security_default_group_rule(self, bytes_body=False):
request_body = {
"to_port": 80,
"from_port": 80,
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
}
self.check_service_client_function(
self.client.create_security_default_group_rule,
'tempest.lib.common.rest_client.RestClient.post',
{"security_group_default_rule": self.FAKE_RULE},
to_utf=bytes_body, **request_body)
def test_create_security_default_group_rule_with_str_body(self):
self._test_create_security_default_group_rule()
def test_create_security_default_group_rule_with_bytes_body(self):
self._test_create_security_default_group_rule(bytes_body=True)
def test_delete_security_group_default_rule(self):
self.check_service_client_function(
self.client.delete_security_group_default_rule,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=204, security_group_default_rule_id=1)
| 1.953125 | 2 |
main.py | Light-Lens/PassGen | 3 | 2093 | <filename>main.py
# PassGen
# These imports will be used for this project.
from colorama import Fore, Style
from colorama import init
import datetime
import string
import random
import sys
import os
# Initilaze File organizer.
os.system('title PassGen')
init(autoreset = True)
# Create Log Functions.
class LOG:
def INFO_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"{CurrentTime} - INFO: {message}")
def STATUS_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"{CurrentTime} - STATUS: {message}")
def ERROR_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(Fore.RED + Style.BRIGHT + f"{CurrentTime} - ERROR: {message}")
def WARN_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(Fore.YELLOW + Style.BRIGHT + f"{CurrentTime} - WARNING: {message}")
# This will Generate a Strong Password for the User!
def Generate(PassLen):
JoinChars = [] # Create an Empty List.
# Split the List of these String Operations, and Join them to JoinChars List.
JoinChars.extend(list(string.ascii_letters))
JoinChars.extend(list(string.digits))
JoinChars.extend(list(string.punctuation))
random.shuffle(JoinChars) # Shuffle the List.
# Get the random passoword.
return "".join(JoinChars[0:PassLen])
# Code Logic here.
LOG.WARN_LOG("Initialized PassGen!")
LOG.STATUS_LOG("Generating a Random Password for You.")
Password = Generate(random.randint(5, 17))
LOG.INFO_LOG(f"Your Password is: {Password}")
with open("Password.log", "a") as File: File.write(f"{Password}\n")
if (len(sys.argv) == 1) or (len(sys.argv) > 1 and sys.argv[1].lower() != "-o"):
os.system("start Password.log")
sys.exit() # Exiting the program successfully.
| 2.96875 | 3 |
memos/memos/models/Memo.py | iotexpert/docmgr | 0 | 2094 | """
The model file for a Memo
"""
import re
import os
import shutil
import json
from datetime import datetime
from flask import current_app
from memos import db
from memos.models.User import User
from memos.models.MemoState import MemoState
from memos.models.MemoFile import MemoFile
from memos.models.MemoSignature import MemoSignature
from memos.models.MemoReference import MemoReference
from memos.models.MemoHistory import MemoHistory
from memos.models.MemoActivity import MemoActivity
from memos.revletter import b10_to_rev, rev_to_b10
class Memo(db.Model):
"""This class is the single interface to a "memo" and all of the "memos"
"""
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer) # Memo Number
version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA
confidential = db.Column(db.Boolean, default=False) # if true only author, signer, distribution can read
distribution = db.Column(db.String(128), default='') # user names on the distribution
keywords = db.Column(db.String(128), default='') # any keyword
title = db.Column(db.String(128), nullable=False, default='') # The title of the memo
num_files = db.Column(db.Integer, default=0) # The number of files attached to the memo
action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last time anything happened
create_date = db.Column(db.DateTime) # when the memo was created
submit_date = db.Column(db.DateTime) # when the memo was most recently submitted (from created)
active_date = db.Column(db.DateTime) # when the memo was moved to active state (from submitted)
obsolete_date = db.Column(db.DateTime) # when the memo was moved to obsolete state (from active)
user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of the user who owns the memo
_signers = db.Column(db.String(128),default='') # the hidden list of signer usernames
_references = db.Column(db.String(128),default='') # The hidden list of references
memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete
def __init__(self, **kwargs):
super().__init__(**kwargs)
# do custom initialization here
def __repr__(self):
return f"{self.user.username}-{self.number}{self.version}"
def __str__(self):
return f"{self.user.username}-{self.number}{self.version}"
########################################
# Permission Functions
########################################
@staticmethod
def can_create(owner=None, delegate=None):
"""Will return true if the delegate can create a memo for the owner"""
if owner is None:
return False
if delegate is None:
delegate = owner
return owner.is_delegate(delegate=delegate)
def can_revise(self, delegate=None):
"""Is the delgate allowed to update "this" memo?"""
if delegate is None:
return False
if not self.user.is_delegate(delegate):
return False
if self.memo_state == MemoState.Active or self.memo_state == MemoState.Obsolete:
return True
def can_sign(self, signer=None, delegate=None):
"""Can this memo be signed by delegate for the signers"""
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate=delegate):
return False
# The list of signers and if they have signed are kept in the MemoSignature table
status = MemoSignature.is_signer(self.id,signer)
return status['is_signer'] and not status['status']
def can_unsign(self, signer=None, delegate=None):
"""Can this memo be unsigned by delegate for the signer """
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate=delegate):
return False
status = MemoSignature.is_signer(self.id,signer)
return status['is_signer'] and status['status']
def can_obsolete(self, delegate=None):
""" Can this memo be obsoleted by the delegate? Only active memos can be obsoleted """
if delegate is None:
return False
if not self.user.is_delegate(delegate):
return False
if self.memo_state == MemoState.Active:
return True
return False
def can_cancel(self, delegate=None):
""" can this memo be cancled by the delegate. Only drafts memos can be canceled"""
if delegate is None:
return False
if self.memo_state != MemoState.Draft:
return False
if not self.user.is_delegate(delegate=delegate):
return False
return True
def can_reject(self, signer=None, delegate=None):
""" can this memo be rejected by the delegate. Only memos in signoff can be rejected"""
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate):
return False
status = MemoSignature.is_signer(memo_id=self.id,signer=signer)
# if you are a signer you can reject.. even if you have already signed
return status['is_signer']
def has_access(self, user=None):
"""This function will return True of the "username" has access to self"""
# if it is not confidential than anyone can access
if self.confidential == False:
return True
# at this point we know it is confidential so ... they must provide a username
if user is None:
return False
# you alway have access to your own memo's
if self.user.username == user.username:
return True
if user.admin:
return True
if user.readAll:
return True
# if the username is in the distribution list then provide access TODO: ARH do something better
if user.username in re.split('\s|\,|\t|\;|\:',self.distribution):
return True
return False
########################################
# ??? Functions
########################################
def get_fullpath(self):
""" This function gives the os path to a file """
path = os.path.join(current_app.root_path,"static","memos",f"{self.user_id}",f"{self.number}",f"{self.version}")
return path
def get_relpath(self):
""" Return the relative path of this memo """
path = os.path.join("/static","memos",f"{self.user_id}",f"{self.number}",f"{self.version}")
return path
def get_files(self):
""" Return a list of the files attached to this memo"""
memo_list = MemoFile.query.filter_by(memo_id=self.id).all()
return memo_list
def saveJson(self):
""" Create the JSON file which is a copy of all of the meta data """
js = {}
js['title']=self.title
js['number']=self.number
js['version']=self.version
js['confidential']=self.confidential
js['distribution']=self.distribution
js['keywords']=self.keywords
js['userid']=self.user_id
js['memo_state']=f"{self.memo_state}"
js['keywords']= self.keywords
js['signers']=self.signers['signers']
js['references']= self.references['ref_string']
js['files']=[]
for file in self.get_files():
js['files'].append(file.filename)
path = os.path.join(self.get_fullpath())
#current_app.logger.info(f"Making Directory {path}")
os.makedirs(path,exist_ok=True)
#current_app.logger.info(f"Making Succeeded {path}")
path = os.path.join(path,f"meta-{self.user_id}-{self.number}-{self.version}.json")
f = open(path,"w")
json.dump(js,f)
f.close()
@property
def signers(self):
# get the signers from the signing table and turn it back to a string and a list
siglist = MemoSignature.get_signers(self)
for sig in siglist:
sig.signer = User.find(username=sig.signer_id)
sig.delegate = User.find(username=sig.delegate_id)
return {'signers':self._signers,'siglist':siglist}
@signers.setter
def signers(self,signer_names):
self._signers = signer_names
MemoSignature.delete_signers(self)
users = User.valid_usernames(signer_names)
for signer in users['valid_users']:
MemoSignature.add_signer(memo=self,signer=signer)
######################################################################
# References
######################################################################
@staticmethod
def parse_reference(reference):
parts = re.split(r'-',reference)
if len(parts) == 2:
parts.append(None)
return parts
@staticmethod
def valid_references(references):
current_app.logger.info(f'references ={references}')
valid_memos = []
valid_refs = []
invalid = []
for memo_ref in re.split(r'\s|\,|\t|\;|\:',references):
if memo_ref == '':
continue
parts = Memo.parse_reference(memo_ref)
if len(parts) > 3 or len(parts) < 2:
invalid.append(memo_ref)
current_app.logger.info(f"INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}")
continue
username = parts[0]
memo_number = parts[1]
memo_version = parts[2]
memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version)
current_app.logger.info(f"Memo = {memo}")
if memo != None and (memo.memo_state == MemoState.Active or memo.memo_state == MemoState.Obsolete):
valid_memos.append(memo)
valid_refs.append(memo_ref)
else:
invalid.append(memo_ref)
rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid}
return rval
@property
def references(self):
# this function will return a list of refeference objects + a string of the references
refs = MemoReference.get_refs(self)
rval = []
for ref in refs:
userid=ref[0]
memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2])
if ref[2] == None:
refstring=f"{userid}-{ref[1]}"
else:
refstring=f"{userid}-{ref[1]}-{ref[2]}"
rval.append((refstring,memo))
return {'reflist':rval,'ref_string':self._references}
@references.setter
def references(self,references):
self._references = references
refs = Memo.valid_references(references)
for i in range(len(refs['valid_refs'])):
parsed_ref = Memo.parse_reference(refs['valid_refs'][i])
user = User.find(username=parsed_ref[0])
MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2])
@property
def backrefs(self):
return MemoReference.get_back_refs(self)
######################################################################
#
######################################################################
def get_next_version(self):
memo = Memo.query.join(User).filter(Memo.number == self.number)\
.order_by(Memo.version.desc()).first()
current_app.logger.info(f"get_next_version {memo.id} {memo.number} {memo.version}")
if memo:
return b10_to_rev(rev_to_b10(memo.version)+1)
return b10_to_rev(1) # also known as 'A'
def save(self):
db.session.add(self)
db.session.commit()
self.saveJson()
################################################################################
# functions used to process the state
# these function would classiavally be called private
################################################################################
def obsolete_previous(self,acting=None):
prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all()
for memo in prev_list:
if memo.memo_state == MemoState.Active:
memo.memo_state = MemoState.Obsolete
MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting)
memo.save()
# This function is called when:
# 1- a valid draft is created
# 2- a signature happens
# 3- an unsign happens
def process_state(self,acting=None):
if self.memo_state == MemoState.Draft:
if MemoSignature.status(self.id) == False:
self.memo_state = MemoState.Signoff
self.submit_date = datetime.utcnow()
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting)
self.notify_signers(f"memo {self.user.username}-{self.number}-{self.version} has gone into signoff")
else:
self.memo_state = MemoState.Active
self.active_date = datetime.utcnow()
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)
self.obsolete_previous(acting=acting)
self.notify_distribution(f"memo {self.user.username}-{self.number}-{self.version} has been published")
if self.memo_state == MemoState.Signoff:
if MemoSignature.status(self.id):
self.memo_state = MemoState.Active
self.active_date = datetime.utcnow()
self.notify_distribution(f"memo {self.user.username}-{self.number}-{self.version} has been published")
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)
self.obsolete_previous(acting=acting)
else:
current_app.logger.info(f"Signatures Still Required")
self.action_date = datetime.utcnow()
self.save()
# TODO: ARH
def notify_distribution(self,message):
current_app.logger.info(F"Notify Distribution {self.distribution} {message}")
# TODO: ARH
def notify_signers(self,message):
current_app.logger.info(F"Notify signers {message}")
################################################################################
# State machine functions called by the viewcontroller
################################################################################
# Owner Function
@staticmethod
def create_revise(owner=None,delegate=None,memo_number=None):
""" This function will return None or a new Memo if the owner/delgate and revise this memo """
assert owner != None and delegate != None
if owner == None or delegate == None:
return None
if owner.is_delegate(delegate) != True:
return None
memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first()
# create a new memo (i.e. not a new version of an existing memo)
if memo_number == None or memo==None:
memo_number = Memo.get_next_number(owner)
new_memo = Memo(number = memo_number,\
version = 'A',\
confidential = False,\
distribution = '',\
keywords = '',\
title = '',\
num_files = 0,\
user_id = owner.username,\
memo_state = MemoState.Draft,\
action_date = datetime.utcnow(),\
create_date = datetime.utcnow(),\
signers = '' )
new_memo.save()
MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)
current_app.logger.info(f"Creating new memo {new_memo}")
return new_memo
if memo.memo_state == MemoState.Draft:
current_app.logger.info(f"Found a draft memo {memo}")
return memo
# revise an existing memo
new_memo = Memo(number = memo_number,\
version = memo.get_next_version(),\
confidential = memo.confidential,\
distribution = memo.distribution,\
keywords = memo.keywords,\
title = memo.title,\
num_files = 0,\
user_id = memo.user_id,\
memo_state = MemoState.Draft,\
action_date = datetime.utcnow(),\
create_date = datetime.utcnow(),\
)
new_memo.save()
new_memo.references = memo.references['ref_string'] # cannot be done until there is an id assigned by the save
new_memo.signers = memo._signers # cannot be done until there is an id assigned by the save
new_memo.save()
MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)
return new_memo
# signer function
def sign(self,signer=None,delegate=None):
current_app.logger.info(f"signer = {signer} delegate={delegate}")
if not self.can_sign(signer,delegate):
current_app.logger.info("NOT!!@ allowed to sign")
return False
current_app.logger.info("allowed to sign")
MemoSignature.sign(self.id,signer,delegate)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign)
self.process_state(acting=delegate)
return True
# signer function
def unsign(self,signer=None,delegate=None):
if not self.can_unsign(signer,delegate):
return False
MemoSignature.unsign(self.id,signer,delegate)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign)
self.process_state(acting=delegate)
return True
# Owner Function
def obsolete(self,delegate=None):
current_app.logger.info(f"Obsolete: {self} Delegate={delegate}")
if not self.can_obsolete(delegate=delegate):
return False
self.memo_state = MemoState.Obsolete
self.action_date = datetime.utcnow()
self.obsolete_date = datetime.utcnow()
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete)
self.save()
return True
# Owner Function
def cancel(self,delegate=None):
current_app.logger.info(f"Cancel: {self} Delegate={delegate}")
memostring = f"{self}"
if not self.can_cancel(delegate=delegate):
return False
MemoFile.delete(self)
# delete all of the files in that directory & the directory
shutil.rmtree(self.get_fullpath())
MemoReference.delete(self)
MemoSignature.delete_signers(self)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel)
db.session.delete(self)
db.session.commit()
current_app.logger.info(f"Canceling")
return True
# signer function
def reject(self,signer=None,delegate=None):
current_app.logger.info(f"signer = {signer} delegate={delegate}")
if not self.can_reject(signer,delegate):
return False
self.memo_state = MemoState.Draft
self.action_date = datetime.utcnow()
self.submit_date = None
self.active_date = None
self.obsolete_date = None
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate)
MemoSignature.unsign_all(self)
self.save()
self.notify_signers(f"Memo {self.user.username}-{self.number}-{self.version} has been rejected for {signer.username} by {delegate.username}")
return True
################################################################################
# End of State machine functions
################################################################################
@staticmethod
def find(memo_id=None,username=None,memo_number=None,memo_version=None):
if memo_id != None:
return Memo.query.filter_by(id=memo_id).first()
current_app.logger.debug(f"FIND: Looking for {username}/{memo_number}/{memo_version}")
memoQry = Memo.query.filter_by(user_id=username,number=memo_number)
if memo_version != None:
memoQry.filter_by(version=memo_version)
memo = memoQry.first()
current_app.logger.debug(f"Found Memo id={memo}")
return memo
@staticmethod
def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None):
if memo_version:
memo_list = Memo.query.join(User).filter(User.username==username,\
Memo.number==memo_number,\
Memo.version==memo_version)\
.paginate(page = page,per_page=pagesize)
elif memo_number:
memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
elif username:
memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
else:
memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memo_list
@staticmethod
def search(title=None,keywords=None,page=1,pagesize=None):
current_app.logger.info(f"Search title={title}")
if title != None:
memo_list = Memo.query.filter(Memo.title.like(f"%{title}%")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
if keywords != None:
memo_list = Memo.query.filter(Memo.keywords.like(f"%{keywords}%")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memo_list
@staticmethod
def get_next_number(user=None):
assert user!=None
memo_list = Memo.query.join(User).filter(User.username==user.username)\
.order_by(Memo.number.desc()).first()
if memo_list == None:
return 1
return memo_list.number+1
@staticmethod
def get_inbox(user=None,page=1,pagesize=None):
assert user!=None,"User must not be none"
if user == None:
return None
msigs = MemoSignature.get_signatures(user,signed=False)
memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
current_app.logger.info(f"Inbox for {user.username} = Items={len(memolist.items)} {memolist}")
return memolist
@staticmethod
def get_drafts(user=None,page=1,pagesize=None):
assert user!=None,"User must not be none"
if user == None:
return None
memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memolist | 2.6875 | 3 |
course_catalog/etl/conftest.py | mitodl/open-discussions | 12 | 2095 | <filename>course_catalog/etl/conftest.py
"""Common ETL test fixtures"""
import json
import pytest
@pytest.fixture(autouse=True)
def mitx_settings(settings):
"""Test settings for MITx import"""
settings.EDX_API_CLIENT_ID = "fake-client-id"
settings.EDX_API_CLIENT_SECRET = "fake-client-secret"
settings.EDX_API_ACCESS_TOKEN_URL = "http://localhost/fake/access/token/url"
settings.EDX_API_URL = "http://localhost/fake/api/url"
settings.MITX_BASE_URL = "http://localhost/fake/base/url"
settings.MITX_ALT_URL = "http://localhost/fake/alt/url"
return settings
@pytest.fixture(autouse=True)
def oll_settings(settings):
"""Test settings for MITx import"""
settings.OLL_API_CLIENT_ID = "fake-client-id"
settings.OLL_API_CLIENT_SECRET = "fake-client-secret"
settings.OLL_API_ACCESS_TOKEN_URL = "http://localhost/fake/access/token/url"
settings.OLL_API_URL = "http://localhost/fake/api/url"
settings.OLL_BASE_URL = "http://localhost/fake/base/url"
settings.OLL_ALT_URL = "http://localhost/fake/alt/url"
return settings
@pytest.fixture
def mitx_course_data():
"""Catalog data fixture"""
with open("./test_json/test_mitx_course.json", "r") as f:
yield json.loads(f.read())
@pytest.fixture
def non_mitx_course_data():
"""Catalog data fixture"""
with open("./test_json/test_non_mitx_course.json", "r") as f:
yield json.loads(f.read())
| 2.046875 | 2 |
juliaset/juliaset.py | PageotD/juliaset | 0 | 2096 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import random
class JuliaSet:
def __init__(self):
"""
Constructor of the JuliaSet class
:param size: size in pixels (for both width and height)
:param dpi: dots per inch (default 300)
"""
# Initialize image related parameters
self.size = 256
self.dpi = 300
self.norm = True
self.mirror = False
# Initialize process related parameters
self.escrad = 3
self.niter = 250
def param(self, **kwargs):
"""
Get parameters from input dictionary and set attributes.
:param kwargs: a dictionary in the form
`{'arg1':value, ..., 'argN': value}`
"""
# Check if kwargs in not empty
if kwargs is not None:
# Image related parameters
if 'size' in kwargs:
self.size = kwargs.pop('size', 256)
if 'dpi' in kwargs:
self.dpi = kwargs.pop('dpi', 300)
if 'norm' in kwargs:
self.norm = kwargs.pop('norm', True)
if 'mirror' in kwargs:
self.mirror = kwargs.pop('mirror', False)
# Process related parameters
if 'escrad' in kwargs:
self.escrad = kwargs.pop('escrad', 3)
if 'niter' in kwargs:
self.niter = kwargs.pop('niter', 250)
# If kwargs is not empty there is some invalid keywords
if kwargs:
print("{} are invalid keyword arguments!".format(kwargs.keys()))
def run(self, show=False, fname='juilaset-output'):
"""
Run the Julia set generator
:param mirror: if True the julia is mirrored horizontally and
vertically; each mirror is concatenate with the original
to produce a new image
:param norm: if true the Julia set is normalized by its
absolute maximum value.
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# Get a complex value among a list of best Julia sets
cpxNum = self.getComplexValue()
# Get the target area
# For more randomness, the target area is a random
# subset of a wide one defined with x[-1.5, 1.5] and
# y[-1.5, 1.5]
xrng, yrng = self.getTargetArea()
# Process
julia = self.processJulia(cpxNum, xrng, yrng)
# Normalization
if(self.norm):
julia /= np.amax(np.abs(julia))
# Mirroring
if(self.mirror):
# Horizontal mirroring and concatenate
juliamirror = np.flip(julia, axis=1)
julia = np.concatenate((julia, juliamirror), axis=1)
# Vertical mirroring and concatenate
juliamirror = np.flip(julia, axis=0)
julia = np.concatenate((julia, juliamirror), axis=0)
# Plot the output with a random colormap using matplotlib
self.plotJuliaSet(julia, show=show, fname=fname)
def getComplexValue(self):
"""
Random choice in a list of best complex values for Julia
sets (real, imag).
:return cpxNum: a semi-random complex value
"""
# Define the list of best complex values
cpxList = [
(-0.10, 0.650), (0.00, 0.80), (0.370, 0.100),
(0.355, 0.355), (-0.54, 0.54), (0.340, -0.05),
(0.37, 0.10), (0.355, 0.355)
]
# Randomly choose one
cpxTmp = random.choice(cpxList)
# Manipulate the base value slightly to make it a little more unique
cpxNum = self.twearkComplex(cpxTmp)
return cpxNum
def twearkComplex(self, cpxTmp):
"""
Manipulate the base value slightly to make it a little more unique.
:param cpxTmp: complex value to modify
:param cpxNum: a slightly manipulate version of the input
"""
# Get the signs for the imaginary parts
isign = random.randrange(-1, 1, 2)
# Get a value variation for for real and imaginary parts
# The possible variation range is fixed at +/- 2% to stay
# In the neightborhood of the initial value
rsigma = random.uniform(0.98, 1.02)
isigma = random.uniform(0.98, 1.02)
# Apply modification and return the new complex value
realPart = cpxTmp[0] * rsigma
imagPart = cpxTmp[1] * isigma * isign
return complex(realPart, imagPart)
def getTargetArea(self):
"""
For more randomness, the target area is a random
subset of a wide one defined with x[-1.5, 1.5] and
y[-1.5, 1.5]
:return xrng, yrng: tuples containing (xmin, xmax)
and (ymin, ymax)
"""
# Randomly choose the center of the target area
# Possible values are in [-1.0, 1.0] to stay in an
# area where there are always pieces of fractals
xctr = random.uniform(-1.0,1.0)
yctr = random.uniform(-1.0,1.0)
# Extend around the center
xrng = (xctr-0.5, xctr+0.5)
yrng = (yctr-0.5, yctr+0.5)
return xrng, yrng
def processJulia(self, cpxNum, xrng, yrng):
"""
Calculate the Julia set for the given input parameters.
:param cpxNum: complex value acting as a seed for the Julia set
:param xrng: range of values (min, max) for the x-axis
:param yrng: range of values (min, max) for the y-axis
:param escrad: escape radius
:param niter: maximum number of iterations
"""
# Initialize numpy array of dimensions (size, size) with zeros
julia = np.ones((self.size, self.size), dtype=np.float32)
# Calculate the width (equal to height) of the image since the
# image is defined as a square
width = xrng[1] - xrng[0] # xmax - xmin = ymax - ymin
# Randomly choose the sign of the shade
#ssign = random.randrange(-1, 1, 2)
ssign = -1.
# Loop over x range
for ix in range(self.size):
# Get the pixel position in the complex plane
# For the real part
realPart = float(ix) / self.size * width + xrng[0]
# Loop over y range
for iy in range(self.size):
# Get the pixel position in the complex plane
# For the imaginary part
imagPart = float(iy) / self.size * width + yrng[0]
# Build the complex
cpxTmp = complex(realPart, imagPart)
# Initialize iteration counter
it = 0
# Loop over iterations
while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter):
# Quadratic polynomial
cpxTmp = cpxTmp**2 + cpxNum
# Increment iteration counter
it += 1
# Calculate the shade (a cool thing find somewhere on the net)
shade = 1. - np.sqrt(it/self.niter)
# Fill the outpout array
julia[ix][iy] = ssign * shade
return julia
def plotJuliaSet(self, julia, fname='juilaset-output', show=False):
"""
Plot the output Julia set and show it in matplotlib window or
write it on disk as a png file.
:param julia: the Julia set
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# List of beautiful colormap for Julia sets
cmapList = [
cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno,
cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma
]
# Randomly chose one colormap
cmapName = random.choice(cmapList)
# Plot the image with a gaussian interpolation
fig = plt.gcf()
fig.set_size_inches(3., 3.)
plt.imshow(julia, interpolation='gaussian', cmap=cmapName)
# Disable axis
plt.axis('off')
if(show):
plt.show()
else:
# Write on disk
fig.savefig(fname+".png", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight')
def julia(**kwargs):
"""
temp
"""
# Initialize Julia Set instance
juliaInstance = JuliaSet()
# If kwargs not empty update the attributes
if kwargs is not None:
juliaInstance.param(**kwargs)
return juliaInstance
if __name__ == "__main__":
# execute only if run as a script
genJuliaSet = JuliaSet()
genJuliaSet.param()
genJuliaSet.run()
| 3.15625 | 3 |
eye_detection.py | ShivanS93/VAtest_withOKN | 0 | 2097 | <reponame>ShivanS93/VAtest_withOKN
#!python3
# eye_detection.py - detect eyes using webcam
# tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/
import cv2
import math
import numpy as np
def main():
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
eyeCascade = cv2.CascadeClassifier("haarcascade_eye.xml")
# grab the reference to the webcam
# try:
vs = cv2.VideoCapture(0)
print(vs)
while True:
ret, frame = vs.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = faceCascade.detectMultiScale(frame)
for (x, y, w, h) in faces:
roi_gray = gray[y : y + h, x : x + w]
roi_color = frame[y : y + h, x : x + w]
eyes = eyeCascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 2)
cv2.imshow("Video", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q") or key == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 3.484375 | 3 |
scripts/make_gene_table.py | lmdu/bioinfo | 0 | 2098 | <gh_stars>0
#!/usr/bin/env python
descripts = {}
with open('macaca_genes.txt') as fh:
fh.readline()
for line in fh:
cols = line.strip('\n').split('\t')
if cols[1]:
descripts[cols[0]] = cols[1].split('[')[0].strip()
else:
descripts[cols[0]] = cols[1]
with open('gene_info.txt') as fh:
for line in fh:
cols = line.strip().split('\t')
cols.append(descripts[cols[1]])
print "\t".join(cols)
| 3.015625 | 3 |
{{cookiecutter.repo_name}}/src/mix_with_scaper.py | nussl/cookiecutter | 0 | 2099 | <filename>{{cookiecutter.repo_name}}/src/mix_with_scaper.py
import gin
from scaper import Scaper, generate_from_jams
import copy
import logging
import p_tqdm
import nussl
import os
import numpy as np
def _reset_event_spec(sc):
sc.reset_fg_event_spec()
sc.reset_bg_event_spec()
def check_mixture(path_to_mix):
mix_signal = nussl.AudioSignal(path_to_mix)
if mix_signal.rms() < .01:
return False
return True
def make_one_mixture(sc, path_to_file, num_sources,
event_parameters, allow_repeated_label):
"""
Creates a single mixture, incoherent. Instantiates according to
the event parameters for each source.
"""
check = False
while not check:
for j in range(num_sources):
sc.add_event(**event_parameters)
sc.generate(
path_to_file,
path_to_file.replace('.wav', '.jams'),
no_audio=False,
allow_repeated_label=allow_repeated_label,
save_isolated_events=True,
)
_reset_event_spec(sc)
check = check_mixture(path_to_file)
def instantiate_and_get_event_spec(sc, master_label, event_parameters):
_reset_event_spec(sc)
_event_parameters = copy.deepcopy(event_parameters)
_event_parameters['label'] = ('const', master_label)
sc.add_event(**_event_parameters)
event = sc._instantiate_event(sc.fg_spec[-1])
_reset_event_spec(sc)
return sc, event
def make_one_mixture_coherent(sc, path_to_file, labels, event_parameters,
allow_repeated_label):
check = False
while not check:
sc, event = instantiate_and_get_event_spec(
sc, labels[0], event_parameters)
for label in labels:
try:
sc.add_event(
label=('const', label),
source_file=('const', event.source_file.replace(labels[0], label)),
source_time=('const', event.source_time),
event_time=('const', 0),
event_duration=('const', sc.duration),
snr=event_parameters['snr'],
pitch_shift=('const', event.pitch_shift),
time_stretch=('const', event.time_stretch)
)
except:
logging.exception(
f"Got an error for {label} @ {_source_file}. Moving on...")
sc.generate(
path_to_file,
path_to_file.replace('.wav', '.jams'),
no_audio=False,
allow_repeated_label=allow_repeated_label,
save_isolated_events=True,
)
sc.fg_spec = []
check = check_mixture(path_to_file)
@gin.configurable
def make_scaper_datasets(scopes=['train', 'val']):
for scope in scopes:
with gin.config_scope(scope):
mix_with_scaper()
@gin.configurable
def mix_with_scaper(num_mixtures, foreground_path, background_path,
scene_duration, sample_rate, target_folder,
event_parameters, num_sources=None, labels=None,
coherent=False, allow_repeated_label=False,
ref_db=-40, bitdepth=16, seed=0, num_workers=1):
nussl.utils.seed(seed)
os.makedirs(target_folder, exist_ok=True)
scaper_seed = np.random.randint(100)
logging.info('Starting mixing.')
if num_sources is None and labels is None:
raise ValueError("One of labels or num_sources must be set!")
if coherent and labels is None:
raise ValueError("Coherent mixing requires explicit labels!")
generators = []
if background_path is None:
background_path = foreground_path
for i in range(num_mixtures):
sc = Scaper(
scene_duration,
fg_path=foreground_path,
bg_path=background_path,
random_state=scaper_seed,
)
sc.ref_db = ref_db
sc.sr = sample_rate
sc.bitdepth = bitdepth
generators.append(sc)
scaper_seed += 1
mix_func = make_one_mixture_coherent if coherent else make_one_mixture
def arg_tuple(i):
_args = (
generators[i],
os.path.join(target_folder, f'{i:08d}.wav'),
labels if coherent else num_sources,
event_parameters,
allow_repeated_label
)
return _args
args = [arg_tuple(i) for i in range(num_mixtures)]
# do one by itself for testing
mix_func(*args[0])
args = list(zip(*args[1:]))
args = [list(a) for a in args]
# now do the rest in parallel
p_tqdm.p_map(mix_func, *args, num_cpus=num_workers)
| 2.0625 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.