max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
exporters/contrib/writers/odo_writer.py | scrapinghub/exporters | 41 | 2300 | import six
import json
import gzip
from exporters.default_retries import retry_long
from exporters.writers.base_writer import BaseWriter
class ODOWriter(BaseWriter):
"""
Writes items to a odo destination. https://odo.readthedocs.org/en/latest/
Needed parameters:
- schema (object)
schema object.
- odo_uri (str)
ODO valid destination uri.
"""
requirements = {
'schema': {'type': object, 'required': True},
'odo_uri': {'type': six.string_types, 'required': True}
}
def __init__(self, options):
super(ODOWriter, self).__init__(options)
from flatson import Flatson
schema = self.read_option('schema', None)
self.odo_uri = self.read_option('odo_uri', None)
self.flatson = Flatson(schema)
self.logger.info('ODOWriter has been initiated. Writing to: {}'.format(self.odo_uri))
@retry_long
def write(self, dump_path, group_key=''):
from odo import odo, resource, discover
import pandas as pd
with gzip.open(dump_path) as f:
lines = [json.loads(line.replace('\n', '')) for line in f.readlines()]
flattened_lines = (self.flatson.flatten(line) for line in lines)
pf = pd.DataFrame(flattened_lines, columns=self.flatson.fieldnames)
dshape = discover(pf)
odo(pf, resource(self.odo_uri), dshape=dshape)
| 2.421875 | 2 |
x7/geom/needs_test.py | gribbg/x7-geom | 0 | 2301 | <reponame>gribbg/x7-geom
"""
Simple file to validate that maketests is working. Call maketests via:
>>> from x7.shell import *; maketests('x7.sample.needs_tests')
"""
def needs_a_test(a, b):
return a+b
| 1.3125 | 1 |
python/SHA3_hashlib_based_concept.py | feketebv/SCA_proof_SHA3-512 | 1 | 2302 | '''
Written by: <NAME> <EMAIL> <EMAIL>
Last updated: 29.01.2021
'''
# the concept is to generate a side channel resistant initialisation of the hashing function based on
# one secret key and several openly known initialisation vectors (IV) in a manner that the same input
# is not hashed too more than two times, which is hopefully not sufficient for side channel
# measurements based computations: the number of consecutive measurements for a successful attack on
# the CHI function in a practically noiseless computer simulation (see "chi_cpa.py") takes around a
# 100 measurements
# this concept is achieved by taking a counter of a certain bitlength, and twice as many IVs as bits in
# the counter: "IV0s" and "IV1s" and compute a series of hashes starting with the secret key then with a
# correspong IV of the sets 0 and 1 based on whether the counter's corresponding bit - starting at MSB -
# is 0 or 1; this way every hash output is exactly used 2 times if the intermediate values are STORTED
# and the entire series of initial hashes are NOT fully recomputed only such whose corresponding
# counter bits has changed and all the next levels too down to the LSB of the counter
# the working solution is going to based on the algorithms presented here, although
# in this file the algorithm here does the full padding so the results won't equal to
# a scheme where the rate is fully filled with IVs and the data comes only afterwards...
import hashlib
# KEY DATA STRUCTURES' INTERPRETATION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IV0s = [658678, 6785697, 254376, 67856, 1432543, 786, 124345, 5443654]
IV1s = [2565, 256658, 985, 218996, 255, 685652, 28552, 3256565]
# LSB ... MSB
hash_copies = [None for i in range(len(IV0s))]
# LSB ... MSB
# counter
# MSB ... LSB
# COMPUTING HASHES FOR EVERY COUNTER VALUE INDIVIDUALLY
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for counter in range(11):
hash = hashlib.sha3_512()
# looping from MSB to LSB in counter too
for i in range(len(IV0s)-1, -1, -1):
if (counter>>i) & 1 == 1:
IV = bytes(IV1s[i])
else:
IV = bytes(IV0s[i])
hash.update(IV)
print(hash.hexdigest())
print()
# COMPUTING HASHES BASED ON THE NATURE OF BINARY INCREMENTATION:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# only fewer values need to be recomputed, those whose corresponding
# bits have changed, down until LSB
# initialize
hash = hashlib.sha3_512()
# looping from MSB to LSB
for i in range(len(IV0s)-1, -1, -1):
# addressing "MSB" of IVs at first, "LSB" at last!
IV = bytes(IV0s[i])
hash.update(IV)
# index 0 of hash_copies changes the most frequently ie. according to counter's LSB
hash_copies[i] = hash.copy()
# compute
last_counter = 0
for counter in range(11):
IV_mask = last_counter ^ counter
last_counter = counter
# determine the highest non-zero bit of IV_mask, LSB is 1, 0 means there was no change
nz = 0
while IV_mask > 0:
IV_mask >>= 1
nz += 1
# initialize hash to the last value whose corresponding counter bit didn't switch
# have to copy object otherwise the originally pointed version gets updated!
hash = hash_copies[nz].copy() # LSB is index 0
# compute only the remaining hashes
while nz != 0: # nz=0 is the initial condition, nothing needs to be done
nz -= 1
if (counter>>nz) & 1 == 1:
IV = bytes(IV1s[nz])
else:
IV = bytes(IV0s[nz])
hash.update(IV)
# needs to be copied again because of object orientation
hash_copies[nz] = hash.copy()
# showing the hash copies' entire table after each computation
#for hashes in hash_copies:
# print(hashes.hexdigest())
print(hash_copies[0].hexdigest())
| 2.921875 | 3 |
graphstar/utils.py | pengboomouch/graphstar | 0 | 2303 | """
graphstar.utils
~~~~~~~~~~~~~~~
<NAME>
A simple bedirectional graph with A* and breadth-first pathfinding.
Utils are either used by the search algorithm, or when needed :)
Pretty self explainatory (I hope)
For more information see the examples and tests folder
"""
def smooth_path(p):
# If the path is only two nodes long, then
# we can’t smooth it, so return
if len(p) == 2:
return p
# Compile an output path
output = [p[0]]
# Keep track of where we are in the input path
# We start at 2, because we assume two adjacent
# nodes will pass the ray cast
i = 2
# Loop until we find the last item in the input
while i < len(p)-1:
# Do the ray cast
if not ray_clear(output[len(output)-1], p[i]):
# The ray text failed, add the last node that
# passed to the output list
output += p[i-1]
# Consider the next node
i += 1
# We’ve reached the end of the input path, add the
# end node to the output and return it
output += p[len(p)-1]
return output
def clean_route_list(route_stack: list, goal_node_id: int):
"""
Creates an ordered route list from start to finish
with all node ids needed to traverse to the goal.
:param route_stack: All routes found until goal
:param goal_node: int ID of the goal node
:return: list A ordered list from start to goal
"""
r = []
next_node = goal_node_id
reversed_stack = reversed(route_stack)
for c in reversed_stack:
if c.to_node.id == next_node:
r.append(c.to_node.id)
r.append(c.from_node.id)
next_node = c.from_node.id
return list(set(r))
| 3.609375 | 4 |
design_patterns/pubsub/simple_events/__init__.py | JASTYN/pythonmaster | 3 | 2304 | <filename>design_patterns/pubsub/simple_events/__init__.py
class Event:
def __init__(self):
self.handlers = set()
def subscribe(self, func):
self.handlers.add(func)
def unsubscribe(self, func):
self.handlers.remove(func)
def emit(self, *args):
for func in self.handlers:
func(*args)
| 2.6875 | 3 |
jinchi/demo/foobar.py | jiz148/py-test | 0 | 2305 | import os
def check_env(env_var_name):
"""
Check and return the type of an environment variable.
supported types:
None
Integer
String
@param env_var_name: environment variable name
@return: string of the type name.
"""
try:
val = os.getenv(env_var_name)
if val is None:
return 'None'
except Exception as ex:
return "None"
try:
int_val = int(val)
return 'Integer'
except ValueError:
return 'String'
| 3.484375 | 3 |
sound/serializers.py | Anirudhchoudhary/ApnaGanna__backend | 0 | 2306 | from .models import Sound , Album
from rest_framework import serializers
class SoundSerializer(serializers.ModelSerializer):
class Meta:
model = Sound
fields = ["name" , "song_image" , "pk" , "like" , "played" , "tag" , "singer" , "upload_date"]
class SoundDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Sound
fields = "__all__"
class AlbumSerializer(serializers.ModelSerializer):
sound = serializers.SerializerMethodField()
class Meta:
model = Album
fields = ["name" , "datepublish" , "category" , "sound"]
depth = 1
def get_sound(self , obj):
print("WORKING")
return SoundSerializer(instance=obj.sound , many=True).data
| 2.546875 | 3 |
tracking/utils.py | WGBH/django-tracking | 0 | 2307 | from datetime import datetime
from django.conf import settings
import pytz
def check_tracker(obj, simple=True):
if simple:
if obj.status > 0:
return True
return False
# we have a gatekeeper
now = datetime.now(pytz.utc)
if obj.tracker_publish_status < 0:
return False
if obj.tracker_publish_status > 0:
return True
# Checking live_as_of ...
# is live_as_of set?
if not obj.tracker_live_as_of: # No live_as_of --- bail
return False
# has it happened yet?
if now < obj.tracker_live_as_of: # live_as_of --- not yet!
return False
# is there an expiration date?
if obj.tracker_expires and now > obj.tracker_expires: # EXPIRED!
return False
# it's OK then
return True
DEFAULT_TRACKER_POSITIONS = [
('tracker-head-top', 'Head - near top'),
('tracker-head-bottom', 'Head - near bottom'),
('tracker-body-top', 'Body - near top'),
('tracker-body-bottom', 'Body - near bottom')
]
def get_tracker_position_options():
"""
This creates the dropdown in the Admin for where to put each tracker.
It defaults to the obvious 4 location (top/bottom of the head/body);
however the user can create more by adding a list of 3-ples in the settings
file under ADDITIONAL_TRACKER_POSITIONS.
(2-letter-code, description, block name), e.g.
('HN', 'Header Navigation', 'header-navigation-trackers')
would allow for the user to have tracking code in a navbar (no, I don't know
why they'd want this) if they put
{% block header-navigation-trackers %}{% generate_trackers 'HN' %}{% endblock %}
in their template.
"""
tracker_position_list = DEFAULT_TRACKER_POSITIONS
additional_tracker_positions = getattr(settings, "ADDITIONAL_TRACKER_POSITIONS", [])
full_list = list()
for x in (tracker_position_list + additional_tracker_positions):
full_list.append((x[0], x[1]))
return full_list | 2.546875 | 3 |
devtools/api/health.py | ankeshkhemani/devtools | 0 | 2308 | import datetime
from fastapi import APIRouter
router = APIRouter()
@router.get("", tags=["health"])
async def get_health():
return {
"results": [],
"status": "success",
"timestamp": datetime.datetime.now().timestamp()
}
| 2.546875 | 3 |
computation/Tests/Jetson/TF_model.py | y-x-c/Heliot | 4 | 2309 | import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import json
import time
import cv2
PATH_TO_FROZEN_GRAPH = '../data/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_frozen.pb'
info='Time taken to load Model into memory:'
start_time=time.time()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
# Load the labels
#Load categories
categories = []
with open('../data/' + 'categories.txt', 'r') as f:
for line in f:
cat = line.split('\n')[0]
if cat != 'classes':
categories.append(cat)
f.close()
print('Number of categories:', len(categories))
# Load image size
with open('../data/' + 'inputsize.txt', 'r') as f:
reqsize = int(f.readline().split('\n')[0])
#print(reqsize)
#image_filename = '../data/' + 'image1.jpg'
def Load_and_process_img(image_filename):
img = cv2.imread(image_filename)#.astype(numpy.float32)
img = cv2.resize(img, (reqsize, reqsize))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img.astype(float)
#img values are scaled from -1 to 1
img /= 255.0
img -= 0.5
img *= 2.0
return img
sess=tf.Session(graph=detection_graph)
def run_inference_b1(key_name,image, graph,no_of_run):
#model output layer name
ops = graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
#print(all_tensor_names)
tensor_dict = {}
for key in [key_name]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = graph.get_tensor_by_name(tensor_name)
image=image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
image_tensor = graph.get_tensor_by_name('input:0')
#Demo run, so that graph is loaded into TF memory
sess.run(tensor_dict,feed_dict={image_tensor: image})
# Run inference
info='Time taken to run inference: run_inference_b1:'+str(no_of_run)+' Times: '
start_time=time.time()
for i in range(no_of_run):
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
#print(output_dict)
top_inds = output_dict[key_name][0].argsort()[::-1][:5]
result=[]
for i in range(5):
result.append([top_inds[i], categories[top_inds[i]], output_dict[key_name][0][top_inds[i]]])
return result, time_taken
image_filename = '../data/' + 'Tiger.jpg'
img = Load_and_process_img(image_filename)
key_name='MobilenetV2/Predictions/Reshape_1'
result,time_taken=run_inference_b1(key_name,img,detection_graph,1000)
print('Time Taken to run Inference is:',time_taken)
print(result)
| 2.296875 | 2 |
frappe/patches/v13_0/update_date_filters_in_user_settings.py | chentaoz/frappe | 3 | 2310 | from __future__ import unicode_literals
import frappe, json
from frappe.model.utils.user_settings import update_user_settings, sync_user_settings
def execute():
users = frappe.db.sql("select distinct(user) from `__UserSettings`", as_dict=True)
for user in users:
user_settings = frappe.db.sql('''
select
* from `__UserSettings`
where
user="{user}"
'''.format(user = user.user), as_dict=True)
for setting in user_settings:
data = frappe.parse_json(setting.get('data'))
if data:
for key in data:
update_user_setting_filters(data, key, setting)
sync_user_settings()
def update_user_setting_filters(data, key, user_setting):
timespan_map = {
'1 week': 'week',
'1 month': 'month',
'3 months': 'quarter',
'6 months': '6 months',
'1 year': 'year',
}
period_map = {
'Previous': 'last',
'Next': 'next'
}
if data.get(key):
update = False
if isinstance(data.get(key), dict):
filters = data.get(key).get('filters')
if filters and isinstance(filters, list):
for f in filters:
if f[2] == 'Next' or f[2] == 'Previous':
update = True
f[3] = period_map[f[2]] + ' ' + timespan_map[f[3]]
f[2] = 'Timespan'
if update:
data[key]['filters'] = filters
update_user_settings(user_setting['doctype'], json.dumps(data), for_update=True)
| 2.109375 | 2 |
miniproject/train.py | peguerosdc/ml4phy-quantum-oscillators | 0 | 2311 | import BoltzmannMachine as bm
import QHO as qho
import numpy as np
import datetime
# Visualization imports
from IPython.display import clear_output
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi']=300
def sigmoid(x):
return .5 * (1 + np.tanh(x / 2.))
# Set the quantum gas with N particles, a limit of 10 for the
# quantum numbers and default temperature and frequency
N = 10*10
gas = qho.QHOGas(N=N)
n_max = 10
training_size = 100000
# the amount of hidden units was set by trial and error
hidden_units = 70
# the recipe suggests to set the batchsize to 10, though it can range
# from 10 to 100
batchsize = 10
# the recipe suggests a learning rate that makes the weight updates about
# 1e-3 times the weights (to within an order of magnitude)
eta = 0.005
# the amount of steps was set by trial and error
nsteps = 300000
# define the validation set to be used in training_visualization
validation_set = gas.generate(amount=20)
def training_visualization(machine, current_step, total_steps, eta, a, b, w, da, db, dw):
# Every now and then (every 50k steps), let us know that the training
# is still running
if current_step%50000 == 0:
print("{:08d} / {:08d}".format(current_step, total_steps), end=" \r")
# After 'checkpoint_steps', show the suggested plots
checkpoint_steps = 10000
if current_step%checkpoint_steps == 0 or current_step == total_steps-1:
print(f"Showing at step {current_step}.")
# Produce a sample starting from the validation set after 100 steps
v_prime = machine.generate(validation_set, 100, a=a, b=b, w=w)
# print useful plots for training
plot_training(validation_set, v_prime, eta, a, b, w, da, db, dw)
def plot_training(v, v_prime, eta, a, b, w, da, db, dw):
clear_output(wait=True)
# Show how the weights light up for the state v
hMean = sigmoid(np.dot(v, w) + b)
image = Image.fromarray(hMean * 256).show()
# Create the grid for all the other plots we want
plt.rcParams.update({'font.size': 2})
# plot histogram of initial vs generated
n = np.arange(0,10)
generated_quantum_numbers = np.rint(v_prime*10)
plt.hist( generated_quantum_numbers.flatten(), bins=np.arange(0,10), density=True, label="Sampled" )
plt.plot( n, gas.p_n(n), label="Theor." )
plt.xlabel('n')
plt.ylabel('P(n)')
plt.legend()
# plot histogram of visible, hidden, weights
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(ncols=3, nrows=2)
def plotit(axis, values, title):
axis.hist(values)
axis.set_title(f"{title}: mm = {np.mean(np.fabs(values))}")
plotit(fig.add_subplot(gs[0,0]), a, 'a')
plotit(fig.add_subplot(gs[0,1]), w.flatten(), 'w')
plotit(fig.add_subplot(gs[0,2]), b, 'b')
# plot histogram of d_visible, d_hidden, d_weights
plotit(fig.add_subplot(gs[1,0]), eta*da, 'da')
plotit(fig.add_subplot(gs[1,1]), eta*dw.flatten(), 'dw')
plotit(fig.add_subplot(gs[1,2]), eta*db, 'db')
# show free energies of the average of samples
x = lambda vv : b + np.dot(vv, w)
free_training = -np.dot(v, a) - np.sum( np.log(1 + np.exp(x(v))), axis=1)
free_valdation = -np.dot(v_prime, a) - np.sum( np.log(1 + np.exp(x(v_prime))), axis=1)
print(f"\nF_training={np.average(free_training)} vs F_validation={np.average(free_valdation)}\n")
# Show.
# CAUTION! This will freeze the execution
plt.show()
# Init the boltzmann machine and train it while visualizing the suggested plots
training_set = gas.generate(amount=training_size, n_max=n_max)
m = bm.BoltzmannMachine(num_hidden=hidden_units)
a,b,w = m.train(training_set, batchsize=batchsize, eta=eta, nsteps=nsteps, do_while_training=None)
# Store in a file
run_id = int(datetime.datetime.now().timestamp())
np.savetxt(f"a_{run_id}.csv", a, delimiter=',')
np.savetxt(f"b_{run_id}.csv", b, delimiter=',')
np.savetxt(f"w_{run_id}.csv", w, delimiter=',')
| 2.84375 | 3 |
Tests/Marketplace/prepare_public_index_for_private_testing.py | diCagri/content | 799 | 2312 | import time
import os
import sys
import shutil
import json
import argparse
from zipfile import ZipFile
from contextlib import contextmanager
from datetime import datetime
from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \
extract_packs_artifacts
from Tests.Marketplace.marketplace_services import init_storage_client
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
MAX_SECONDS_TO_WAIT_FOR_LOCK = 600
LOCK_FILE_PATH = 'lock.txt'
@contextmanager
def lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
try:
acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
yield
except Exception:
logging.exception("Error in dummy index lock context manager.")
finally:
release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
def change_pack_price_to_zero(path_to_pack_metadata):
with open(path_to_pack_metadata, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata['price'] = 0
with open(path_to_pack_metadata, 'w') as pack_metadata_file:
json.dump(pack_metadata, pack_metadata_file, indent=4)
def change_packs_price_to_zero(public_index_folder_path):
paths_to_packs_in_merged_index = [pack_dir.path for pack_dir in os.scandir(public_index_folder_path) if
pack_dir.is_dir()]
for path_to_pack in paths_to_packs_in_merged_index:
path_to_pack_metadata = os.path.join(path_to_pack, 'metadata.json')
change_pack_price_to_zero(path_to_pack_metadata)
def merge_private_index_into_public_index(public_index_folder_path, private_index_folder_path):
packs_in_private_index = [pack_dir.name for pack_dir in os.scandir(private_index_folder_path) if pack_dir.is_dir()]
for pack_name in packs_in_private_index:
path_to_pack_in_private_index = os.path.join(private_index_folder_path, pack_name)
path_to_pack_in_public_index = os.path.join(public_index_folder_path, pack_name)
shutil.copy(path_to_pack_in_private_index, path_to_pack_in_public_index)
def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number,
private_packs):
"""Upload updated index zip to cloud storage.
Args:
public_index_folder_path (str): public index folder full path.
extract_destination_path (str): extract folder full path.
public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob.
build_number (str): circleCI build number, used as an index revision.
private_packs (list): List of private packs and their price.
"""
with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file:
for private_pack in private_packs:
private_pack['price'] = 0
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'packs': private_packs
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(public_index_folder_path)
index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
public_ci_dummy_index_blob.reload()
public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
public_ci_dummy_index_blob.upload_from_filename(index_zip_path)
logging.success("Finished uploading index.zip to storage.")
except Exception:
logging.exception("Failed in uploading index. Mismatch in index file generation.")
sys.exit(1)
finally:
shutil.rmtree(public_index_folder_path)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True)
parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index",
required=True)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.")
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs",
required=True)
parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket",
required=True)
# disable-secrets-detection-end
return parser.parse_args()
def is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
return dummy_index_lock_blob.exists()
def lock_dummy_index(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
with open(LOCK_FILE_PATH, 'w') as lock_file:
lock_file.write('locked')
with open(LOCK_FILE_PATH, 'rb') as lock_file:
dummy_index_lock_blob.upload_from_file(lock_file)
def acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
total_seconds_waited = 0
while is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
if total_seconds_waited >= MAX_SECONDS_TO_WAIT_FOR_LOCK:
logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")
sys.exit(1)
if total_seconds_waited % 60 == 0:
# Printing a message every minute to keep the machine from dying due to no output
logging.info("Waiting to acquire lock.")
total_seconds_waited += 10
time.sleep(10)
lock_dummy_index(public_storage_bucket, dummy_index_lock_path)
def release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
dummy_index_lock_blob.delete()
os.remove(LOCK_FILE_PATH)
def add_private_packs_from_dummy_index(private_packs, dummy_index_blob):
downloaded_dummy_index_path = 'current_dummy_index.zip'
extracted_dummy_index_path = 'dummy_index'
dummy_index_json_path = os.path.join(extracted_dummy_index_path, 'index', 'index.json')
dummy_index_blob.download_to_filename(downloaded_dummy_index_path)
os.mkdir(extracted_dummy_index_path)
if os.path.exists(downloaded_dummy_index_path):
with ZipFile(downloaded_dummy_index_path, 'r') as index_zip:
index_zip.extractall(extracted_dummy_index_path)
with open(dummy_index_json_path) as index_file:
index_json = json.load(index_file)
packs_from_dummy_index = index_json.get('packs', [])
for pack in private_packs:
is_pack_in_dummy_index = any(
[pack['id'] == dummy_index_pack['id'] for dummy_index_pack in packs_from_dummy_index])
if not is_pack_in_dummy_index:
packs_from_dummy_index.append(pack)
os.remove(downloaded_dummy_index_path)
shutil.rmtree(extracted_dummy_index_path)
return packs_from_dummy_index
def main():
install_logging('prepare_public_index_for_private_testing.log', logger=logging)
upload_config = option_handler()
service_account = upload_config.service_account
build_number = upload_config.ci_build_number
public_bucket_name = upload_config.public_bucket_name
private_bucket_name = upload_config.private_bucket_name
storage_base_path = upload_config.storage_base_path
extract_public_index_path = upload_config.extract_public_index_path
changed_pack = upload_config.pack_name
extract_destination_path = upload_config.extract_artifacts_path
packs_artifacts_path = upload_config.artifacts_path
dummy_index_dir_path = upload_config.dummy_index_dir_path
dummy_index_path = os.path.join(dummy_index_dir_path, 'index.zip')
dummy_index_lock_path = os.path.join(dummy_index_dir_path, 'lock.txt')
storage_client = init_storage_client(service_account)
public_storage_bucket = storage_client.bucket(public_bucket_name)
private_storage_bucket = storage_client.bucket(private_bucket_name)
dummy_index_blob = public_storage_bucket.blob(dummy_index_path)
with lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
public_index_folder_path, public_index_blob, _ = download_and_extract_index(public_storage_bucket,
extract_public_index_path, storage_base_path)
# In order for the packs to be downloaded successfully, their price has to be 0
change_packs_price_to_zero(public_index_folder_path)
private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
public_index_folder_path,
changed_pack, True,
storage_base_path)
private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob)
upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number,
private_packs)
if __name__ == '__main__':
main()
| 2.03125 | 2 |
ARMODServers/Apps/ARExperiences/apps.py | Phantomxm2021/ARMOD-Dashboard | 1 | 2313 | from django.apps import AppConfig
class ArexperiencesConfig(AppConfig):
name = 'Apps.ARExperiences'
| 1.148438 | 1 |
configs/_base_/datasets/flyingchairs_320x448.py | zhouzaida/mmflow | 1 | 2314 | <filename>configs/_base_/datasets/flyingchairs_320x448.py<gh_stars>1-10
dataset_type = 'FlyingChairs'
data_root = 'data/FlyingChairs_release'
img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False)
global_transform = dict(
translates=(0.05, 0.05),
zoom=(1.0, 1.5),
shear=(0.86, 1.16),
rotate=(-10., 10.))
relative_transform = dict(
translates=(0.00375, 0.00375),
zoom=(0.985, 1.015),
shear=(1.0, 1.0),
rotate=(-1.0, 1.0))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(
type='ColorJitter',
brightness=0.5,
contrast=0.5,
saturation=0.5,
hue=0.5),
dict(type='RandomGamma', gamma_range=(0.7, 1.5)),
dict(type='Normalize', **img_norm_cfg),
dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)),
dict(type='RandomFlip', prob=0.5, direction='horizontal'),
dict(type='RandomFlip', prob=0.5, direction='vertical'),
dict(
type='RandomAffine',
global_transform=global_transform,
relative_transform=relative_transform),
dict(type='RandomCrop', crop_size=(320, 448)),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['imgs', 'flow_gt'],
meta_keys=[
'img_fields', 'ann_fields', 'filename1', 'filename2',
'ori_filename1', 'ori_filename2', 'filename_flow',
'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg'
]),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='InputResize', exponent=6),
dict(type='Normalize', **img_norm_cfg),
dict(type='TestFormatBundle'),
dict(
type='Collect',
keys=['imgs'],
meta_keys=[
'flow_gt', 'filename1', 'filename2', 'ori_filename1',
'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg',
'scale_factor', 'pad_shape'
])
]
flyingchairs_train = dict(
type=dataset_type,
pipeline=train_pipeline,
data_root=data_root,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt')
data = dict(
train_dataloader=dict(
samples_per_gpu=1,
workers_per_gpu=2,
drop_last=True,
persistent_workers=True),
val_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False),
train=flyingchairs_train,
val=dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'),
test=dict(
type=dataset_type,
pipeline=test_pipeline,
data_root=data_root,
test_mode=True,
split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'))
| 1.765625 | 2 |
plaidml2/edsl/__init__.py | ZhouXiaolin/plaidml | 4,535 | 2315 | <gh_stars>1000+
# Copyright 2019 Intel Corporation.
import logging
from collections import namedtuple
import numpy as np
import six
from plaidml2 import DType
from plaidml2.core import TensorShape, Buffer
from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib
logger = logging.getLogger(__name__)
def __init():
"""Docstring for function plaidml2.edsl.__init"""
ffi_call(lib.plaidml_edsl_init)
ffi.init_once(__init, 'plaidml_edsl_init')
class LogicalShape(ForeignObject):
"""Docstring for class LogicalShape"""
__ffi_del__ = lib.plaidml_logical_shape_free
__ffi_repr__ = lib.plaidml_logical_shape_repr
def __init__(self, dtype=None, dims=[], ptr=None):
if ptr:
ffi_obj = ptr
elif dtype is not None:
raw_dims = ffi.new('int64_t[]', [0 if x is None else x for x in dims])
ffi_obj = ffi_call(lib.plaidml_logical_shape_alloc, dtype, len(dims), raw_dims)
else:
raise ValueError('One of dtype= or ptr= must be specified.')
super(LogicalShape, self).__init__(ffi_obj)
@property
def dtype(self):
return DType(ffi_call(lib.plaidml_logical_shape_get_dtype, self.as_ptr()))
@property
def ndims(self):
return ffi_call(lib.plaidml_logical_shape_get_ndims, self.as_ptr())
@property
def int_dims(self):
"""Returns the dimensions of a LogicalShape as a list.
Args:
self (pointer): The object pointer for a LogicalShape
Returns:
list (int): Integer dimensions of the LogicalShape.
"""
return [
ffi_call(lib.plaidml_logical_shape_get_dim_int, self.as_ptr(), i)
for i in range(self.ndims)
]
def into_TensorShape(self):
return TensorShape(
ptr=ffi_call(lib.plaidml_logical_shape_into_tensor_shape, self.as_ptr()))
Constraint = namedtuple('Constraint', ['lhs', 'rhs'])
def wrap_dim(x):
if isinstance(x, six.integer_types):
return TensorDim(expr=ffi_call(lib.plaidml_dim_expr_int, x))
return x
def dim_op(op, *args):
args = [wrap_dim(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return ffi_call(lib.plaidml_dim_expr_op, op, len(args), raw_args)
class TensorDim(ForeignObject):
"""Docstring for class TensorDim"""
__ffi_del__ = lib.plaidml_dim_expr_free
__ffi_repr__ = lib.plaidml_dim_expr_repr
def __init__(self, expr=None):
if expr is None:
expr = ffi_call(lib.plaidml_dim_expr_none)
super(TensorDim, self).__init__(expr)
def _bind(self, expr):
self.take_ptr(expr)
def __neg__(self):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_NEG, self))
def __add__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD, self, other))
def __radd__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_ADD, other, self))
def __sub__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB, self, other))
def __rsub__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_SUB, other, self))
def __mul__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL, self, other))
def __rmul__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_MUL, other, self))
def __floordiv__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV, self, other))
def __rfloordiv__(self, other):
return TensorDim(dim_op(lib.PLAIDML_INT_OP_DIV, other, self))
def wrap_poly(x):
if isinstance(x, six.integer_types):
return TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_literal, x))
if isinstance(x, TensorDim):
return TensorIndex(expr=ffi_call(lib.plaidml_poly_expr_dim, x.as_ptr()))
return x
def poly_op(op, *args):
args = [wrap_poly(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return ffi_call(lib.plaidml_poly_expr_op, op, len(args), raw_args)
class TensorIndex(ForeignObject):
"""Docstring for class TensorIndex"""
__ffi_del__ = lib.plaidml_poly_expr_free
__ffi_repr__ = lib.plaidml_poly_expr_repr
def __init__(self, expr=None, name=''):
if expr is None:
expr = ffi_call(lib.plaidml_poly_expr_index, name.encode())
super(TensorIndex, self).__init__(expr)
def __lt__(self, rhs):
return Constraint(self, wrap_dim(rhs))
def __neg__(self):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_NEG, self))
def __add__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD, self, rhs))
def __radd__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_ADD, lhs, self))
def __sub__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB, self, rhs))
def __rsub__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_SUB, lhs, self))
def __mul__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL, self, rhs))
def __rmul__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_MUL, lhs, self))
def __floordiv__(self, rhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV, self, rhs))
def __rfloordiv__(self, lhs):
return TensorIndex(poly_op(lib.PLAIDML_INT_OP_DIV, lhs, self))
class _IndexMap(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, ref, key):
if isinstance(key, tuple) or isinstance(key, list):
idxs = key
else:
idxs = [key]
idxs = [wrap_poly(x) for x in idxs]
raw_idxs = [x.as_ptr() for x in idxs]
expr = ffi_call(lib.plaidml_expr_index_map, ref.as_ptr(), len(idxs), raw_idxs)
super(_IndexMap, self).__init__(expr)
class _SizeMap(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, dims):
dims = [wrap_dim(x) for x in dims]
raw_dims = [x.as_ptr() for x in dims]
expr = ffi_call(lib.plaidml_expr_size_map, len(dims), raw_dims)
super(_SizeMap, self).__init__(expr)
class _Contraction(ForeignObject):
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
def __init__(self, agg_op, combo_op, src_idxs, sink_idxs, sink_sizes, name):
src_idxs = [x.as_ptr() for x in src_idxs]
expr = ffi_call(
lib.plaidml_expr_contraction,
agg_op,
combo_op,
sink_idxs.as_ptr(),
sink_sizes.as_ptr(),
len(src_idxs),
src_idxs,
name.encode(),
)
super(_Contraction, self).__init__(expr)
_ContractionPart = namedtuple('_ContractionPart', ['op', 'args'])
class IndexedTensor(object):
"""Docstring for class IndexedTensor"""
def __init__(self, impl, tensor=None):
self._impl = impl
self._tensor = tensor
def __repr__(self):
return repr(self._impl)
# Represents an aggregation_op of SUM in a contraction
def __iadd__(self, rhs):
return IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_SUM, rhs))
# Represents an aggregation_op of PROD in a contraction
def __imul__(self, rhs):
return IndexedTensor(self._make_contraction(lib.PLAIDML_AGG_OP_PROD, rhs))
# Represents an aggregation_op of MAX in a contraction
def __ge__(self, rhs):
self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MAX, rhs))
# Represents an aggregation_op of MIN in a contraction
def __le__(self, rhs):
self._tensor._set_contraction(self._make_contraction(lib.PLAIDML_AGG_OP_MIN, rhs))
# Represents a combo_op of PLUS in a contraction
def __add__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_ADD, (self, rhs)))
# Represents a combo_op of MULTIPLY in a contraction
def __mul__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_MUL, (self, rhs)))
# Represents a combo_op of EQ in a contraction
def __eq__(self, rhs):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_EQ, (self, rhs)))
def _make_contraction(self, agg_op, rhs):
# Extract combo_op and inputs
if isinstance(rhs._impl, _IndexMap):
# Unary op
combo_op = lib.PLAIDML_COMBO_OP_NONE
inputs = [rhs._impl]
elif isinstance(rhs._impl, _ContractionPart):
# Binary/Ternary op
combo_op = rhs._impl.op
inputs = [x._impl for x in rhs._impl.args]
else:
raise ValueError('Invalid impl')
return _Contraction(
agg_op,
combo_op,
inputs,
self._impl,
_SizeMap(self._tensor._dims),
self._tensor._name,
)
class Tensor(ForeignObject):
"""Docstring for class Tensor"""
__ffi_del__ = lib.plaidml_expr_free
__ffi_repr__ = lib.plaidml_expr_repr
_dims = None
_is_contraction = False
def __init__(self, shape=None, dims=None, expr=None, value=None, name='', buffer=None):
self._name = name
self._buffer = buffer
if shape:
if buffer is None:
raw_buffer = ffi.NULL
else:
raw_buffer = buffer.as_ptr()
expr = ffi_call(lib.plaidml_expr_placeholder, shape.as_ptr(), raw_buffer,
name.encode())
elif dims is not None:
self._dims = dims
expr = None
elif value is not None:
if isinstance(value, six.integer_types):
expr = ffi_call(lib.plaidml_expr_int, value)
elif isinstance(value, float):
expr = ffi_call(lib.plaidml_expr_float, value)
else:
raise TypeError('Invalid type for value={}'.format(value))
elif expr is None:
raise ValueError('One of dims=, shape=, or expr= must be specified.')
super(Tensor, self).__init__(expr)
def set_param_value(self, buffer):
# Changes the value of a parameter tensor (i.e. one explicitly set to a buffer value)
# Illegal on other tensors
ffi_call(lib.plaidml_expr_param_reset, self.__ffi_obj__, buffer.as_ptr())
def __hash__(self):
return hash((self.as_ptr(), self._dims, self._is_contraction))
def __getitem__(self, key):
return IndexedTensor(_IndexMap(self, key), tensor=self)
def __setitem__(self, key, value):
if isinstance(value._impl, _Contraction):
# standard contraction
self._set_contraction(value._impl)
elif isinstance(value, Tensor):
pass
elif isinstance(value._impl, _IndexMap):
# Unary ASSIGN contraction
self._set_contraction(
_Contraction(
lib.PLAIDML_AGG_OP_ASSIGN,
lib.PLAIDML_COMBO_OP_NONE,
[value._impl],
_IndexMap(self, key),
_SizeMap(self._dims),
self._name,
))
elif isinstance(value._impl, _ContractionPart):
# Binary or ternary ASSIGN contraction
self._set_contraction(
_Contraction(
lib.PLAIDML_AGG_OP_ASSIGN,
value._impl.op,
[x._impl for x in value._impl.args],
_IndexMap(self, key),
_SizeMap(self._dims),
self._name,
))
else:
raise ValueError('Invalid impl when assigning to a Tensor (Type: {})'.format(
type(value._impl)))
def _set_contraction(self, cion):
self._is_contraction = True
self.take_ptr(cion)
# Represents an eltwise negation
def __neg__(self):
return call('neg', self)
# Represents an eltwise bit_not
def __invert__(self):
return call('bit_not', self)
# Represents an eltwise addition
def __add__(self, rhs):
return call('add', self, rhs)
def __radd__(self, lhs):
return call('add', lhs, self)
# Represents an eltwise subtraction
def __sub__(self, rhs):
return call('sub', self, rhs)
def __rsub__(self, lhs):
return call('sub', lhs, self)
# Represents an eltwise multiplication
def __mul__(self, rhs):
return call('mul', self, rhs)
def __rmul__(self, lhs):
return call('mul', lhs, self)
# Represents an eltwise division
def __div__(self, rhs):
return call('div', self, rhs)
def __rdiv__(self, lhs):
return call('div', lhs, self)
# Represents an eltwise division
def __truediv__(self, rhs):
return call('div', self, rhs)
def __rtruediv__(self, lhs):
return call('div', lhs, self)
# Represents an eltwise cmp_eq
def __eq__(self, rhs):
return call('cmp_eq', self, rhs)
# Represents an eltwise cmp_ne
def __ne__(self, rhs):
return call('cmp_ne', self, rhs)
# Represents an eltwise cmp_lt
def __lt__(self, rhs):
return call('cmp_lt', self, rhs)
# Represents an eltwise cmp_gt
def __gt__(self, rhs):
return call('cmp_gt', self, rhs)
# Represents an eltwise cmp_le
def __le__(self, rhs):
return call('cmp_le', self, rhs)
# Represents an eltwise cmp_ge
def __ge__(self, rhs):
return call('cmp_ge', self, rhs)
# Represents an eltwise bit_left
def __lshift__(self, rhs):
return call('bit_left', self, rhs)
def __rlshift__(self, lhs):
return call('bit_left', lhs, self)
# Represents an eltwise bit_right
def __rshift__(self, rhs):
return call('bit_right', self, rhs)
def __rrshift__(self, lhs):
return call('bit_right', lhs, self)
# Represents an eltwise bit_and
def __and__(self, rhs):
return call('bit_and', self, rhs)
def __rand__(self, lhs):
return call('bit_and', lhs, self)
# Represents an eltwise bit_or
def __or__(self, rhs):
return call('bit_or', self, rhs)
def __ror__(self, lhs):
return call('bit_or', lhs, self)
# Represents an eltwise bit_xor
def __xor__(self, rhs):
return call('bit_xor', self, rhs)
def __rxor__(self, lhs):
return call('bit_xor', lhs, self)
# Enable no_reduce on a contraction
def no_reduce(self):
if not self._is_contraction:
raise TypeError('no_reduce can only be specified on a contraction.')
ffi_call(lib.plaidml_expr_contraction_set_no_reduce, self.as_ptr(), True)
return self
# Set use_default on a contraction
def use_default(self, rhs):
if not self._is_contraction:
raise TypeError('use_default can only be specified on a contraction.')
ffi_call(lib.plaidml_expr_contraction_set_use_default, self.as_ptr(), rhs.as_ptr())
return self
def add_constraint(self, constraint):
ffi_call(
lib.plaidml_expr_contraction_add_constraint,
self.as_ptr(),
constraint.lhs.as_ptr(),
constraint.rhs.as_ptr(),
)
def add_constraints(self, constraints):
for constraint in constraints:
self.add_constraint(constraint)
# Return the tensor's shape
@property
def shape(self):
return LogicalShape(ptr=ffi_call(lib.plaidml_expr_get_shape, self.as_ptr()))
# Verify that the specified dims match the dims of this tensor.
def bind_dims(self, *dims):
raw_dims = [x.as_ptr() for x in dims]
ffi_call(lib.plaidml_expr_bind_dims, self.as_ptr(), len(raw_dims), raw_dims)
# bind a concrete shape to this tensor
def bind(self, shape):
ffi_call(lib.plaidml_expr_bind_shape, self.as_ptr(), shape.as_ptr())
class TensorRef:
"""Docstring for class TensorRef"""
def __init__(self, tensor):
self.tensor = tensor
def __hash__(self):
return hash(ffi_call(lib.plaidml_expr_ptr, self.tensor.as_ptr()))
def __eq__(self, other):
if isinstance(other, Tensor):
return self.__hash__() == TensorRef(other).__hash__()
return self.__hash__() == other.__hash__()
class Value(ForeignObject):
"""Docstring for class Value"""
__ffi_del__ = lib.plaidml_value_free
__ffi_repr__ = lib.plaidml_value_repr
def __init__(self, value):
# logger.debug('Value({})'.format(value))
if isinstance(value, np.ndarray):
if value.ndim == 0:
value = value.item()
else:
value = value.tolist()
if value is None:
ffi_obj = ffi_call(lib.plaidml_value_none)
elif isinstance(value, (six.integer_types, bool)):
ffi_obj = ffi_call(lib.plaidml_value_int, value)
elif isinstance(value, float):
ffi_obj = ffi_call(lib.plaidml_value_float, value)
elif isinstance(value, TensorDim):
ffi_obj = ffi_call(lib.plaidml_value_dim, value.as_ptr())
elif isinstance(value, Tensor):
ffi_obj = ffi_call(lib.plaidml_value_expr, value.as_ptr())
elif isinstance(value, (list, tuple)):
self._elts = [Value(x) for x in value]
raw_elts = [x.as_ptr() for x in self._elts]
ffi_obj = ffi_call(lib.plaidml_value_tuple, len(raw_elts), raw_elts)
elif isinstance(value, six.string_types):
ffi_obj = ffi_call(lib.plaidml_value_str, value.encode('utf-8'))
elif isinstance(value, ffi.CData) and ffi.typeof(value) is ffi.typeof('plaidml_value*'):
ffi_obj = value
else:
raise TypeError('Unsupported type {} for value={}'.format(type(value), value))
super(Value, self).__init__(ffi_obj)
def as_tensor(self):
return Tensor(expr=ffi_call(lib.plaidml_value_expr_get, self.as_ptr()))
def TensorOutput(*args):
return Tensor(dims=args)
def TensorDims(count):
return [TensorDim() for i in range(count)]
def TensorIndexes(count):
return [TensorIndex() for i in range(count)]
class ProgramArgument:
"""Docstring for class ProgramArgument"""
def __init__(self, arg):
self.is_input = arg.is_input
self.ref = TensorRef(Tensor(expr=ffi_call(lib.plaidml_expr_clone, arg.tensor)))
self.shape = LogicalShape(ptr=ffi_call(lib.plaidml_logical_shape_clone, arg.shape))
if arg.buffer:
tensor_shape = self.shape.into_TensorShape()
self.buffer = Buffer(tensor_shape, ptr=ffi_call(lib.plaidml_buffer_clone, arg.buffer))
else:
self.buffer = None
class Program(ForeignObject):
"""Docstring for class Program"""
__ffi_del__ = lib.plaidml_program_free
__ffi_repr__ = lib.plaidml_program_repr
def __init__(self, name, outputs, updates=[]):
raw_outputs = [x.as_ptr() for x in outputs]
dst_updates = [x[0].as_ptr() for x in updates]
src_updates = [x[1].as_ptr() for x in updates]
raw_args = ffi.new('plaidml_program_args**')
ffi_obj = ffi_call(
lib.plaidml_program_evaluate,
name.encode(),
len(raw_outputs),
raw_outputs,
len(updates),
src_updates,
dst_updates,
raw_args,
)
self.args = [ProgramArgument(raw_args[0].args[i]) for i in range(raw_args[0].nargs)]
ffi_call(lib.plaidml_program_args_free, raw_args[0])
super(Program, self).__init__(ffi_obj)
@property
def inputs(self):
return [x for x in self.args if x.is_input]
@property
def outputs(self):
return [x for x in self.args if not x.is_input]
def wrap_tensor(x):
if isinstance(x, six.integer_types):
return Tensor(expr=ffi_call(lib.plaidml_expr_int, x))
if np.issubdtype(type(x), np.integer):
return Tensor(expr=ffi_call(lib.plaidml_expr_int, x.item()))
if isinstance(x, float):
return Tensor(expr=ffi_call(lib.plaidml_expr_float, x))
if isinstance(x, TensorDim):
return Tensor(expr=ffi_call(lib.plaidml_expr_dim, x.as_ptr()))
if isinstance(x, Tensor):
return x
raise TypeError('Unexpected type for call argument: {}. fn: {}, args: {}, bad arg: {}'.format(
type(x), fn, args, x))
def call(fn, *args):
args = [wrap_tensor(x) for x in args]
raw_args = [x.as_ptr() for x in args]
return Tensor(expr=ffi_call(lib.plaidml_expr_call, fn.encode(), len(args), raw_args))
def cast(x, dtype):
return Tensor(expr=ffi_call(lib.plaidml_expr_cast, wrap_tensor(x).as_ptr(), dtype))
def as_bool(x):
return cast(x, DType.BOOLEAN)
def as_float(x, bit_size):
map = {
16: DType.FLOAT16,
32: DType.FLOAT32,
64: DType.FLOAT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_float'
return cast(x, dtype)
def as_int(x, bit_size):
map = {
8: DType.INT8,
16: DType.INT16,
32: DType.INT32,
64: DType.INT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_int'
return cast(x, dtype)
def as_uint(x, bit_size):
map = {
8: DType.UINT8,
16: DType.UINT16,
32: DType.UINT32,
64: DType.UINT64,
}
dtype = map.get(bit_size)
if not dtype:
raise 'Unsupport bit_size for as_uint'
return cast(x, dtype)
def ceil(x):
return call('ceil', x)
def cond(lhs, rhs, true_case):
return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_COND, (lhs, rhs, true_case)))
def cos(x):
return call('cos', x)
def exp(x):
return call('exp', x)
def floor(x):
return call('floor', x)
def gather(x, y):
return call('gather', x, y)
def gradients(loss, variables):
wrts = [x.as_ptr() for x in variables]
raw_grads = ffi.new('plaidml_expr*[]', len(wrts))
ffi_call(
lib.plaidml_expr_gradient,
len(wrts),
wrts,
loss.as_ptr(),
raw_grads,
)
return [Tensor(expr=x) for x in raw_grads]
def ident(x):
return call('ident', x)
def index(x, axis):
return call('index', x, axis)
def jacobian(loss, variables):
wrts = [x.as_ptr() for x in variables]
raw_grads = ffi.new('plaidml_expr*[]', len(wrts))
ffi_call(
lib.plaidml_expr_jacobian,
len(wrts),
wrts,
loss.as_ptr(),
raw_grads,
)
return [Tensor(expr=x) for x in raw_grads]
def log(x):
return call('log', x)
def max(x, y):
return call('max', x, y)
def min(x, y):
return call('min', x, y)
def pow(x, y):
return call('pow', x, y)
def prng(state, shape):
return call('prng', state, *shape)
def reshape(x, dims):
return call('reshape', x, *dims)
def round(x):
return call('round', x)
def scatter(x, y, z):
return call('scatter', x, y, z)
def select(cond, true_case, false_case):
return call('cond', cond, true_case, false_case)
def shape(x):
return call('shape', x)
def sin(x):
return call('sin', x)
def sqrt(x):
return call('sqrt', x)
def tan(x):
return call('tan', x)
def tanh(x):
return call('tanh', x)
| 2.15625 | 2 |
pytorch_toolkit/face_recognition/model/common.py | AnastasiaaSenina/openvino_training_extensions | 1 | 2316 | <reponame>AnastasiaaSenina/openvino_training_extensions
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
from functools import partial
import torch.nn as nn
class ModelInterface(nn.Module):
"""Abstract class for models"""
@abstractmethod
def set_dropout_ratio(self, ratio):
"""Sets dropout ratio of the model"""
@abstractmethod
def get_input_res(self):
"""Returns input resolution"""
from .rmnet_angular import RMNetAngular
from .mobilefacenet import MobileFaceNet
from .landnet import LandmarksNet
from .se_resnet_angular import SEResNetAngular
from .shufflenet_v2_angular import ShuffleNetV2Angular
from .backbones.se_resnet import se_resnet50, se_resnet101, se_resnet152
from .backbones.resnet import resnet50
from .backbones.se_resnext import se_resnext50, se_resnext101, se_resnext152
models_backbones = {'rmnet': RMNetAngular,
'mobilenetv2': MobileFaceNet,
'mobilenetv2_2x': partial(MobileFaceNet, width_multiplier=2.0),
'mobilenetv2_1_5x': partial(MobileFaceNet, width_multiplier=1.5),
'resnet50': partial(SEResNetAngular, base=resnet50),
'se_resnet50': partial(SEResNetAngular, base=se_resnet50),
'se_resnet101': partial(SEResNetAngular, base=se_resnet101),
'se_resnet152': partial(SEResNetAngular, base=se_resnet152),
'se_resnext50': partial(SEResNetAngular, base=se_resnext50),
'se_resnext101': partial(SEResNetAngular, base=se_resnext101),
'se_resnext152': partial(SEResNetAngular, base=se_resnext152),
'shufflenetv2': ShuffleNetV2Angular}
models_landmarks = {'landnet': LandmarksNet}
| 1.25 | 1 |
src/dataclay/util/logs.py | kpavel/pyclay | 1 | 2317 | <reponame>kpavel/pyclay
""" Class description goes here. """
import json
import logging
class JSONFormatter(logging.Formatter):
"""Simple JSON formatter for the logging facility."""
def format(self, obj):
"""Note that obj is a LogRecord instance."""
# Copy the dictionary
ret = dict(obj.__dict__)
# Perform the message substitution
args = ret.pop("args")
msg = ret.pop("msg")
ret["message"] = msg % args
# Exceptions must be formatted (they are not JSON-serializable
try:
ei = ret.pop("exc_info")
except KeyError:
pass
else:
if ei is not None:
ret["exc_info"] = self.formatException(ei)
# Dump the dictionary in JSON form
return json.dumps(ret, skipkeys=True)
| 2.875 | 3 |
python/orthogonal_test.py | davxy/numeric | 2 | 2318 | <reponame>davxy/numeric<gh_stars>1-10
# Orthogonal linear system solver tests
from math import sqrt
import numpy as np
from orthogonal import orthogonal
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('1 1;'
'1 -1', float)
A = A*1.0/sqrt(2.0)
# Known terms vector
b = np.matrix('2; 3')
# Solve the system
x = orthogonal(A, b, 1)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure')
################################################################################
# 2x2 orthogonal matrix
A = np.matrix('2 -2 1;'
'1 2 2;'
'2 1 -2', float)
A = A*1.0/3.0
# Known terms vector
b = np.matrix('2; 3; 4')
# Solve the system
x = orthogonal(A, b)
# Check
if np.allclose(b, A*x) == False:
raise Exception('Orthogonal test failure') | 2.828125 | 3 |
src/autonlp/project.py | adbmd/autonlp | 1 | 2319 | import os
import shutil
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, List, Optional
from huggingface_hub import Repository
from loguru import logger
from prettytable import PrettyTable
from .splits import TEST_SPLIT, TRAIN_SPLIT, VALID_SPLIT
from .tasks import TASKS
from .utils import BOLD_TAG, CYAN_TAG, GREEN_TAG, PURPLE_TAG, RESET_TAG, YELLOW_TAG, http_get, http_post
from .validation import validate_file
FILE_STATUS = (
"☁ Uploaded",
"⌚ Queued",
"⚙ In Progress...",
"✅ Success!",
"❌ Failed: file not found",
"❌ Failed: unsupported file type",
"❌ Failed: server error",
"❌ Invalid column mapping, please fix it and re-upload the file.",
)
JOB_STATUS = (
("⌚", "queued"),
("🚀", "start"),
("⚙", "data_munging"),
("🏃", "model_training"),
("✅", "success"),
("❌", "failed"),
)
PROJECT_STATUS = (
("✨", "Created"),
("🚀", "Data processing started"),
("✅", "Data processing successful"),
("❌", "Failed to download data files from the huggingface hub"),
("❌", "Missing 'train' or 'valid' split in data files"),
("❌", "Failed to process data files"),
("❌", "Failed to upload processed data files to the huggingface hub"),
)
SPLITS = (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT)
@dataclass
class TrainingJob:
"""A training job in AutoNLP"""
job_id: int
status: str
status_emoji: str
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
job_id=json_resp["id"],
status_emoji=JOB_STATUS[json_resp["status"] - 1][0],
status=JOB_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📚 Model # {self.job_id}",
f" • {BOLD_TAG}Status{RESET_TAG}: {self.status_emoji} {self.status}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class UploadedFile:
"""A file uploaded to an AutoNLP project"""
file_id: int
filename: str
processing_status: str
split: str
col_mapping: Dict[str, str]
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
file_id=json_resp["data_file_id"],
filename=json_resp["fname"],
processing_status=FILE_STATUS[json_resp["download_status"] - 1],
split=SPLITS[json_resp["split"] - 1],
col_mapping=json_resp["col_mapping"],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📁 {CYAN_TAG}{self.filename}{RESET_TAG} (id # {self.file_id})",
f" • {BOLD_TAG}Split{RESET_TAG}: {self.split}",
f" • {BOLD_TAG}Processing status{RESET_TAG}: {self.processing_status}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class Project:
"""An AutoNLP project"""
_token: str
proj_id: int
name: str
user: str
task: str
status_emoji: str
status: str
language: str
created_at: datetime
updated_at: datetime
dataset_id: str
files: Optional[List[UploadedFile]] = None
training_jobs: Optional[List] = None
@classmethod
def from_json_resp(cls, json_resp: dict, token: str):
"""Build a Project from the API response, JSON-encoded"""
return cls(
proj_id=json_resp["id"],
name=json_resp["proj_name"],
user=json_resp["username"],
task=list(filter(lambda key: TASKS[key] == json_resp["task"], TASKS.keys()))[0],
status_emoji=PROJECT_STATUS[json_resp["status"] - 1][0],
status=PROJECT_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
dataset_id=json_resp["dataset_id"],
language=json_resp["config"]["language"],
_token=token,
)
def refresh(self):
"""Update information about uploaded files and models attached to the project"""
logger.info("🔄 Refreshing uploaded files information...")
resp = http_get(path=f"/projects/{self.proj_id}/data", token=self._token)
json_files = resp.json()
self.files = [UploadedFile.from_json_resp(file) for file in json_files]
logger.info("🔄 Refreshing models information...")
resp = http_get(path=f"/projects/{self.proj_id}/jobs", token=self._token)
json_jobs = resp.json()
self.training_jobs = [TrainingJob.from_json_resp(job) for job in json_jobs]
def upload(self, filepaths: List[str], split: str, col_mapping: Dict[str, str]):
"""Uploads files to the project"""
local_dataset_dir = os.path.expanduser(f"~/.huggingface/autonlp/projects/{self.dataset_id}")
if os.path.exists(local_dataset_dir):
if os.path.isdir(os.path.join(local_dataset_dir, "git")):
clone_from = None
else:
shutil.rmtree(local_dataset_dir)
clone_from = "https://huggingface.co./datasets/" + self.dataset_id
else:
clone_from = "https://huggingface.co./datasets/" + self.dataset_id
dataset_repo = Repository(
local_dir=local_dataset_dir,
clone_from=clone_from,
use_auth_token=self._token,
)
dataset_repo.git_pull()
for idx, file_path in enumerate(filepaths):
if not os.path.isfile(file_path):
logger.error(f"[{idx + 1}/{len(filepaths)}] ❌ '{file_path}' does not exist or is not a file!")
continue
file_name = os.path.basename(file_path)
file_extension = file_name.split(".")[-1]
src = os.path.expanduser(file_path)
dst = os.path.join(local_dataset_dir, "raw", file_name)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📦 Copying {src} to {dst}...")
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copyfile(src, dst)
logger.info(f"[{idx + 1}/{len(filepaths)}] 🔎 Validating {dst} and column mapping...")
validate_file(path=dst, task=self.task, file_ext=file_extension, col_mapping=col_mapping)
dataset_repo.lfs_track(patterns=[f"raw/*.{file_extension}"])
dataset_repo.git_pull()
try:
logger.info("☁ Uploading files to the dataset hub...")
dataset_repo.push_to_hub(commit_message="Upload from AutoNLP CLI")
logger.info("✅ Successfully uploaded the files!")
except OSError as err:
if "nothing to commit, working tree clean" in err.args[0]:
logger.info("❔ Files did not change since last upload!")
dataset_repo.git_push()
return
logger.error("❌ Something went wrong when uploading the files!")
raise
for idx, file_path in enumerate(filepaths):
file_name = os.path.basename(file_path)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📁 Registering file {file_name} into project '{file_name}'...")
payload = {
"split": split,
"col_mapping": col_mapping,
"data_files": [{"fname": file_name, "username": self.user}],
}
http_post(path=f"/projects/{self.proj_id}/data/add", payload=payload, token=self._token)
logger.info(f"[{idx + 1}/{len(filepaths)}] ✅ Success!")
def train(self):
"""Starts training on the models"""
http_get(path=f"/projects/{self.proj_id}/data/start_process", token=self._token)
logger.info("🔥🔥 Training started!")
def __str__(self):
header = "\n".join(
[
f"AutoNLP Project (id # {self.proj_id})",
"~" * 35,
f" • {BOLD_TAG}Name{RESET_TAG}: {PURPLE_TAG}{self.name}{RESET_TAG}",
f" • {BOLD_TAG}Owner{RESET_TAG}: {GREEN_TAG}{self.user}{RESET_TAG}",
f" • {BOLD_TAG}Status{RESET_TAG}: {BOLD_TAG}{self.status_emoji} {self.status}{RESET_TAG}",
f" • {BOLD_TAG}Task{RESET_TAG}: {YELLOW_TAG}{self.task.title().replace('_', ' ')}{RESET_TAG}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
"",
]
)
printout = [header]
# Uploaded files information
if self.files is None:
descriptions = ["❓ Files information unknown, update the project"]
else:
if len(self.files) == 0:
descriptions = ["🤷 No files uploaded yet!"]
else:
sorted_files = sorted(self.files, key=lambda file: file.split) # Sort by split
descriptions = [str(file) for file in sorted_files]
printout.append(
"\n".join(
[
"~" * 14 + f" {BOLD_TAG}Files{RESET_TAG} " + "~" * 14,
"",
"Dataset ID:",
f"{CYAN_TAG}{self.dataset_id}{RESET_TAG}",
"",
]
+ descriptions
)
)
# Training jobs information
if self.training_jobs is None:
jobs_str = "❓ Models information unknown, update the project"
else:
if len(self.training_jobs) == 0:
jobs_str = "🤷 No train jobs started yet!"
else:
model_table = PrettyTable(["", "ID", "Status", "Creation date", "Last update"])
for job in sorted(self.training_jobs, key=lambda job: job.job_id):
model_table.add_row(
[
job.status_emoji,
job.job_id,
job.status,
job.created_at.strftime("%Y-%m-%d %H:%M Z"),
job.updated_at.strftime("%Y-%m-%d %H:%M Z"),
]
)
jobs_str = str(model_table)
printout.append("\n".join(["", "~" * 12 + f" {BOLD_TAG}Models{RESET_TAG} " + "~" * 11, "", jobs_str]))
return "\n".join(printout)
| 2.296875 | 2 |
backend/services/apns_util.py | xuantan/viewfinder | 645 | 2320 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Apple Push Notification service utilities.
Original copyright for this code: https://github.com/jayridge/apnstornado
TokenToBinary(): converts a hex-encoded token into a binary value
CreateMessage(): formats a binary APNs message from parameters
ParseResponse(): parses APNs binary response for status & identifier
ErrorStatusToString(): converts error status to error message
"""
__author__ = '<EMAIL> (<NAME>)'
import base64
import json
import struct
import time
from tornado import escape
_MAX_PAYLOAD_BYTES = 256
"""Maximum number of bytes in the APNS payload."""
_ELLIPSIS_BYTES = escape.utf8(u'…')
"""UTF-8 encoding of the Unicode ellipsis character."""
def TokenToBinary(token):
return base64.b64decode(token)
def TokenFromBinary(bin_token):
return base64.b64encode(bin_token)
def CreateMessage(token, alert=None, badge=None, sound=None,
identifier=0, expiry=None, extra=None, allow_truncate=True):
token = TokenToBinary(token)
if len(token) != 32:
raise ValueError, u'Token must be a 32-byte binary string.'
if (alert is not None) and (not isinstance(alert, (basestring, dict))):
raise ValueError, u'Alert message must be a string or a dictionary.'
if expiry is None:
expiry = long(time.time() + 365 * 86400)
# Start by determining the length of the UTF-8 encoded JSON with no alert text. This allows us to
# determine how much space is left for the message.
# 'content-available': 1 is necessary to trigger iOS 7's background download processing.
aps = { 'alert' : '', 'content-available': 1 }
if badge is not None:
aps['badge'] = badge
if sound is not None:
aps['sound'] = sound
data = { 'aps' : aps }
if extra is not None:
data.update(extra)
# Create compact JSON representation with no extra space and no escaping of non-ascii chars (i.e. use
# direct UTF-8 representation rather than "\u1234" escaping). This maximizes the amount of space that's
# left for the alert text.
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
bytes_left = _MAX_PAYLOAD_BYTES - len(encoded)
if allow_truncate and isinstance(alert, basestring):
alert = _TruncateAlert(alert, bytes_left)
elif alert and len(escape.utf8(alert)) > bytes_left:
raise ValueError, u'max payload(%d) exceeded: %d' % (_MAX_PAYLOAD_BYTES, len(escape.utf8(alert)))
# Now re-encode including the alert text.
aps['alert'] = alert
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
length = len(encoded)
assert length <= _MAX_PAYLOAD_BYTES, (encoded, length)
return struct.pack('!bIIH32sH%(length)ds' % { 'length' : length },
1, identifier, expiry,
32, token, length, encoded)
def ParseResponse(bytes):
if len(bytes) != 6:
raise ValueError, u'response must be a 6-byte binary string.'
command, status, identifier = struct.unpack_from('!bbI', bytes, 0)
if command != 8:
raise ValueError, u'response command must equal 8.'
return status, identifier, ErrorStatusToString(status)
def ErrorStatusToString(status):
if status is 0:
return 'No errors encountered'
elif status is 1:
return 'Processing error'
elif status is 2:
return 'Missing device token'
elif status is 3:
return 'Missing topic'
elif status is 4:
return 'Missing payload'
elif status is 5:
return 'Invalid token size'
elif status is 6:
return 'Invalid topic size'
elif status is 7:
return 'Invalid payload size'
elif status is 8:
return 'Invalid token'
elif status is 255:
return 'None (unknown)'
else:
return ''
def _TruncateAlert(alert, max_bytes):
"""Converts the alert text to UTF-8 encoded JSON format, which is how
the alert will be stored in the APNS payload. If the number of
resulting bytes exceeds "max_bytes", then truncates the alert text
at a Unicode character boundary, taking care not to split JSON
escape sequences. Returns the truncated UTF-8 encoded alert text,
including a trailing ellipsis character.
"""
alert_json = escape.utf8(json.dumps(escape.recursive_unicode(alert), ensure_ascii=False))
# Strip quotes added by JSON.
alert_json = alert_json[1:-1]
# Check if alert fits with no truncation.
if len(alert_json) <= max_bytes:
return escape.utf8(alert)
# Make room for an appended ellipsis.
assert max_bytes >= len(_ELLIPSIS_BYTES), 'max_bytes must be at least %d' % len(_ELLIPSIS_BYTES)
max_bytes -= len(_ELLIPSIS_BYTES)
# Truncate the JSON UTF8 string at a Unicode character boundary.
truncated = alert_json[:max_bytes].decode('utf-8', errors='ignore')
# If JSON escape sequences were split, then the truncated string may not be valid JSON. Keep
# chopping trailing characters until the truncated string is valid JSON. It may take several
# tries, such as in the case where a "\u1234" sequence has been split.
while True:
try:
alert = json.loads(u'"%s"' % truncated)
break
except Exception:
truncated = truncated[:-1]
# Return the UTF-8 encoding of the alert with the ellipsis appended to it.
return escape.utf8(alert) + _ELLIPSIS_BYTES
| 2.40625 | 2 |
demonstrations/tutorial_kernels_module.py | jamesellis1999/qml | 216 | 2321 | r"""Training and evaluating quantum kernels
===========================================
.. meta::
:property="og:description": Kernels and alignment training with Pennylane.
:property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png
.. related::
tutorial_kernel_based_training Kernel-based training with scikit-learn
tutorial_data_reuploading_classifier Classification with data reuploading
*Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Posted: 24 June 2021*
Kernel methods are one of the cornerstones of classical machine learning.
Here we are concerned with kernels that can be evaluated on quantum computers,
*quantum kernels* for short.
In this tutorial you will learn how to evaluate kernels, use them for classification
and train them with gradient-based optimization, and all that using the
functionality of PennyLane's
`kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__.
The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own
`QHack <https://qhack.ai/>`__ hackathon.
What are kernel methods?
------------------------
To understand what a kernel method does, let's first revisit
one of the simplest methods to assign binary labels to datapoints:
linear classification.
Imagine we want to discern two different classes of points that lie in
different corners of the plane. A linear classifier corresponds to
drawing a line and assigning different labels to the regions on opposing
sides of the line:
.. figure:: ../demonstrations/kernels_module/linear_classification.png
:align: center
:width: 30%
We can mathematically formalize this by assigning the label :math:`y`
via
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b).
The vector :math:`\boldsymbol{w}` points perpendicular to the line and
thus determine its slope. The independent term :math:`b` specifies the
position on the plane. In this form, linear classification can also be
extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a
line does not divide the entire space into two regions anymore. Instead
one needs a *hyperplane*. It is immediately clear that this method is
not very powerful, as datasets that are not separable by a hyperplane
can't be classified without error.
We can actually sneak around this limitation by performing a neat trick:
if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our
datapoints into a larger *feature space* and then perform linear
classification there, we could actually realise non-linear
classification in our original space!
.. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png
:align: center
:width: 65%
If we go back to the expression for our prediction and include the
embedding, we get
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b).
We will forgo one tiny step, but it can be shown that for the purpose
of optimal classification, we can choose the vector defining the
decision boundary as a linear combination of the embedded datapoints
:math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting
this into the formula yields
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right).
This rewriting might not seem useful at first, but notice the above
formula only contains inner products between vectors in the embedding
space:
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle.
We call this function the *kernel*. It provides the advantage that we can often
find an explicit formula for the kernel :math:`k` that makes it
superfluous to actually perform the (potentially expensive) embedding
:math:`\phi`. Consider for example the following embedding and the
associated kernel:
.. math::
\phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\
k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2.
This means by just replacing the regular scalar product in our linear
classification with the map :math:`k`, we can actually express much more
intricate decision boundaries!
This is very important, because in many interesting cases the embedding :math:`\phi`
will be much costlier to compute than the kernel :math:`k`.
In this demo, we will explore one particular kind of kernel
that can be realized on near-term quantum computers, namely *Quantum
Embedding Kernels (QEKs)*. These are kernels that arise from embedding
data into the space of quantum states. We formalize this by considering
a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps
a datapoint :math:`\boldsymbol{x}` to the state
.. math::
|\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle.
The kernel value is then given by the *overlap* of the associated
embedded quantum states
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2.
"""
##############################################################################
# A toy problem
# -------------
# In this demo, we will treat a toy problem that showcases the
# inner workings of classification with quantum embedding kernels,
# training variational embedding kernels and the available functionalities
# to do both in PennyLane. We of course need to start with some imports:
from pennylane import numpy as np
import matplotlib as mpl
np.random.seed(1359)
##############################################################################
# And we proceed right away to create a dataset to work with, the
# ``DoubleCake`` dataset. Firstly, we define two functions to enable us to
# generate the data.
# The details of these functions are not essential for understanding the demo,
# so don't mind them if they are confusing.
def _make_circular_data(num_sectors):
"""Generate datapoints arranged in an even circle."""
center_indices = np.array(range(0, num_sectors))
sector_angle = 2 * np.pi / num_sectors
angles = (center_indices + 0.5) * sector_angle
x = 0.7 * np.cos(angles)
y = 0.7 * np.sin(angles)
labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1
return x, y, labels
def make_double_cake_data(num_sectors):
x1, y1, labels1 = _make_circular_data(num_sectors)
x2, y2, labels2 = _make_circular_data(num_sectors)
# x and y coordinates of the datapoints
x = np.hstack([x1, 0.5 * x2])
y = np.hstack([y1, 0.5 * y2])
# Canonical form of dataset
X = np.vstack([x, y]).T
labels = np.hstack([labels1, -1 * labels2])
# Canonical form of labels
Y = labels.astype(int)
return X, Y
##############################################################################
# Next, we define a function to help plot the ``DoubleCake`` data:
def plot_double_cake_data(X, Y, ax, num_sectors=None):
"""Plot double cake data and corresponding sectors."""
x, y = X.T
cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"])
ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s")
if num_sectors is not None:
sector_angle = 360 / num_sectors
for i in range(num_sectors):
color = ["#FF0000", "#0000FF"][(i % 2)]
other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)]
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
1,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=color,
alpha=0.1,
width=0.5,
)
)
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
0.5,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=other_color,
alpha=0.1,
)
)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_aspect("equal")
ax.axis("off")
return ax
##############################################################################
# Let's now have a look at our dataset. In our example, we will work with
# 3 sectors:
import matplotlib.pyplot as plt
num_sectors = 3
X, Y = make_double_cake_data(num_sectors)
ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors)
##############################################################################
# Defining a Quantum Embedding Kernel
# -----------------------------------
# PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__
# allows for a particularly simple
# implementation of Quantum Embedding Kernels. The first ingredient we
# need for this is an *ansatz*, which we will construct by repeating a
# layer as building block. Let's start by defining this layer:
import pennylane as qml
def layer(x, params, wires, i0=0, inc=1):
"""Building block of the embedding ansatz"""
i = i0
for j, wire in enumerate(wires):
qml.Hadamard(wires=[wire])
qml.RZ(x[i % len(x)], wires=[wire])
i += inc
qml.RY(params[0, j], wires=[wire])
qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1])
##############################################################################
# To construct the ansatz, this layer is repeated multiple times, reusing
# the datapoint ``x`` but feeding different variational
# parameters ``params`` into each of them.
# Together, the datapoint and the variational parameters fully determine
# the embedding ansatz :math:`U(\boldsymbol{x})`.
# In order to construct the full kernel circuit, we also require its adjoint
# :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``.
def ansatz(x, params, wires):
"""The embedding ansatz"""
for j, layer_params in enumerate(params):
layer(x, layer_params, wires, i0=j * len(wires))
adjoint_ansatz = qml.adjoint(ansatz)
def random_params(num_wires, num_layers):
"""Generate random variational parameters in the shape for the ansatz."""
return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)
##############################################################################
# Together with the ansatz we only need a device to run the quantum circuit on.
# For the purpose of this tutorial we will use PennyLane's ``default.qubit``
# device with 5 wires in analytic mode.
dev = qml.device("default.qubit", wires=5, shots=None)
wires = dev.wires.tolist()
##############################################################################
# Let us now define the quantum circuit that realizes the kernel. We will compute
# the overlap of the quantum states by first applying the embedding of the first
# datapoint and then the adjoint of the embedding of the second datapoint. We
# finally extract the probabilities of observing each basis state.
@qml.qnode(dev)
def kernel_circuit(x1, x2, params):
ansatz(x1, params, wires=wires)
adjoint_ansatz(x2, params, wires=wires)
return qml.probs(wires=wires)
##############################################################################
# The kernel function itself is now obtained by looking at the probability
# of observing the all-zero state at the end of the kernel circuit -- because
# of the ordering in ``qml.probs``, this is the first entry:
def kernel(x1, x2, params):
return kernel_circuit(x1, x2, params)[0]
##############################################################################
#
# .. note::
# An alternative way to set up the kernel circuit in PennyLane would be
# to use the observable type
# `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__.
# This is shown in the
# `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more
# background information on the kernel circuit structure itself.
#
# Before focusing on the kernel values we have to provide values for the
# variational parameters. At this point we fix the number of layers in the
# ansatz circuit to :math:`6`.
init_params = random_params(num_wires=5, num_layers=6)
##############################################################################
# Now we can have a look at the kernel value between the first and the
# second datapoint:
kernel_value = kernel(X[0], X[1], init_params)
print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}")
##############################################################################
# The mutual kernel values between all elements of the dataset form the
# *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix``
# method, which makes use of symmetry of the kernel,
# :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`.
# In addition, the option ``assume_normalized_kernel=True`` ensures that we do not
# calculate the entries between the same datapoints, as we know them to be 1
# for our noiseless simulation. Overall this means that we compute
# :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints.
# To include the variational parameters, we construct a ``lambda`` function that
# fixes them to the values we sampled above.
init_kernel = lambda x1, x2: kernel(x1, x2, init_params)
K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True)
with np.printoptions(precision=3, suppress=True):
print(K_init)
##############################################################################
# Using the Quantum Embedding Kernel for predictions
# --------------------------------------------------
# The quantum kernel alone can not be used to make predictions on a
# dataset, becaues it is essentially just a tool to measure the similarity
# between two datapoints. To perform an actual prediction we will make use
# of scikit-learn's Support Vector Classifier (SVC).
from sklearn.svm import SVC
##############################################################################
# To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function
# that takes two sets of datapoints and returns the associated kernel matrix.
# We can make use of the function ``qml.kernels.kernel_matrix`` that provides
# this functionality. It expects the kernel to not have additional parameters
# besides the datapoints, which is why we again supply the variational
# parameters via the ``lambda`` function from above.
# Once we have this, we can let scikit-learn adjust the SVM from our Quantum
# Embedding Kernel.
#
# .. note::
# This step does *not* modify the variational parameters in our circuit
# ansatz. What it does is solving a different optimization task for the
# :math:`\alpha` and :math:`b` vectors we introduced in the beginning.
svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y)
##############################################################################
# To see how well our classifier performs we will measure which percentage
# of the dataset it classifies correctly.
def accuracy(classifier, X, Y_target):
return 1 - np.count_nonzero(classifier.predict(X) - Y_target) / len(Y_target)
accuracy_init = accuracy(svm, X, Y)
print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}")
##############################################################################
# We are also interested in seeing what the decision boundaries in this
# classification look like. This could help us spotting overfitting issues
# visually in more complex data sets. To this end we will introduce a
# second helper method.
def plot_decision_boundaries(classifier, ax, N_gridpoints=14):
_xx, _yy = np.meshgrid(np.linspace(-1, 1, N_gridpoints), np.linspace(-1, 1, N_gridpoints))
_zz = np.zeros_like(_xx)
for idx in np.ndindex(*_xx.shape):
_zz[idx] = classifier.predict(np.array([_xx[idx], _yy[idx]])[np.newaxis, :])
plot_data = {"_xx": _xx, "_yy": _yy, "_zz": _zz}
ax.contourf(
_xx,
_yy,
_zz,
cmap=mpl.colors.ListedColormap(["#FF0000", "#0000FF"]),
alpha=0.2,
levels=[-1, 0, 1],
)
plot_double_cake_data(X, Y, ax)
return plot_data
##############################################################################
# With that done, let's have a look at the decision boundaries for our
# initial classifier:
init_plot_data = plot_decision_boundaries(svm, plt.gca())
##############################################################################
# We see the outer points in the dataset can be correctly classified, but
# we still struggle with the inner circle. But remember we have a circuit
# with many free parameters! It is reasonable to believe we can give
# values to those variational parameters which improve the overall accuracy
# of our SVC.
#
# Training the Quantum Embedding Kernel
# -------------------------------------
#
# To be able to train the Quantum Embedding Kernel we need some measure of
# how well it fits the dataset in question. Performing an exhaustive
# search in parameter space is not a good solution because it is very
# resource intensive, and since the accuracy is a discrete quantity we
# would not be able to detect small improvements.
#
# We can, however, resort to a more specialized measure, the
# *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the
# similarity predicted by the quantum kernel to the actual labels of the
# training data. It is based on *kernel alignment*, a similiarity measure
# between two kernels with given kernel matrices :math:`K_1` and
# :math:`K_2`:
#
# .. math::
# \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}.
#
# .. note::
# Seen from a more theoretical side, :math:`\operatorname{KA}`
# is nothing else than the cosine of the angle between the kernel
# matrices :math:`K_1` and :math:`K_2` if we see them as vectors
# in the space of matrices with the Hilbert-Schmidt (or
# Frobenius) scalar product
# :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This
# reinforces the geometric picture of how this measure relates
# to objects, namely two kernels, being aligned in a vector space.
#
# The training data enters the picture by defining an *ideal* kernel
# function that expresses the original labelling in the vector
# :math:`\boldsymbol{y}` by assigning to two datapoints the product
# of the corresponding labels:
#
# .. math::
# k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j.
#
# The assigned kernel is thus :math:`+1` if both datapoints lie in the
# same class and :math:`-1` otherwise and its kernel matrix is simply
# given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`.
# The kernel-target alignment is then defined as the kernel alignment
# of the kernel matrix :math:`K` generated by the
# quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`:
#
# .. math::
# \operatorname{KTA}_{\boldsymbol{y}}(K)
# = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}}
# = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N}
#
# where :math:`N` is the number of elements in :math:`\boldsymbol{y}`,
# that is the number of datapoints in the dataset.
#
# In summary, the kernel-target alignment effectively captures how well
# the kernel you chose reproduces the actual similarities of the data. It
# does have one drawback, however: having a high kernel-target alignment
# is only a necessary but not a sufficient condition for a good
# performance of the kernel [#Alignment]_. This means having good alignment is
# guaranteed for good performance, but optimal alignment will not always
# bring optimal training accuracy with it.
#
# Let's now come back to the actual implementation. PennyLane's
# ``kernels`` module allows you to easily evaluate the kernel
# target alignment:
kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True)
print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}")
##############################################################################
# Now let's code up an optimization loop and improve the kernel-target alignment!
#
# We will make use of regular gradient descent optimization. To speed up
# the optimization we will not use the entire training set to compute
# :math:`\operatorname{KTA}` but rather
# sample smaller subsets of the data at each step, we choose :math:`4`
# datapoints at random. Remember that PennyLane's built-in optimizer works
# to *minimize* the cost function that is given to it, which is why we
# have to multiply the kernel target alignment by :math:`-1` to actually
# *maximize* it in the process.
#
# .. note::
# Currently, the function ``qml.kernels.target_alignment`` is not
# differentiable yet, making it unfit for gradient descent optimization.
# We therefore first define a differentiable version of this function.
def target_alignment(
X,
Y,
kernel,
assume_normalized_kernel=False,
rescale_class_labels=True,
):
"""Kernel-target alignment between kernel and labels."""
K = qml.kernels.square_kernel_matrix(
X,
kernel,
assume_normalized_kernel=assume_normalized_kernel,
)
if rescale_class_labels:
nplus = np.count_nonzero(np.array(Y) == 1)
nminus = len(Y) - nplus
_Y = np.array([y / nplus if y == 1 else y / nminus for y in Y])
else:
_Y = np.array(Y)
T = np.outer(_Y, _Y)
inner_product = np.sum(K * T)
norm = np.sqrt(np.sum(K * K) * np.sum(T * T))
inner_product = inner_product / norm
return inner_product
params = init_params
opt = qml.GradientDescentOptimizer(0.2)
for i in range(500):
# Choose subset of datapoints to compute the KTA on.
subset = np.random.choice(list(range(len(X))), 4)
# Define the cost function for optimization
cost = lambda _params: -target_alignment(
X[subset],
Y[subset],
lambda x1, x2: kernel(x1, x2, _params),
assume_normalized_kernel=True,
)
# Optimization step
params = opt.step(cost, params)
# Report the alignment on the full dataset every 50 steps.
if (i + 1) % 50 == 0:
current_alignment = target_alignment(
X,
Y,
lambda x1, x2: kernel(x1, x2, params),
assume_normalized_kernel=True,
)
print(f"Step {i+1} - Alignment = {current_alignment:.3f}")
##############################################################################
# We want to assess the impact of training the parameters of the quantum
# kernel. Thus, let's build a second support vector classifier with the
# trained kernel:
# First create a kernel with the trained parameter baked into it.
trained_kernel = lambda x1, x2: kernel(x1, x2, params)
# Second create a kernel matrix function using the trained kernel.
trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel)
# Note that SVC expects the kernel argument to be a kernel matrix function.
svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y)
##############################################################################
# We expect to see an accuracy improvement vs. the SVM with random
# parameters:
accuracy_trained = accuracy(svm_trained, X, Y)
print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}")
##############################################################################
# We have now achieved perfect classification! 🎆
#
# Following on the results that SVM's have proven good generalisation
# behavior, it will be interesting to inspect the decision boundaries of
# our classifier:
trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca())
##############################################################################
# Indeed, we see that now not only every data instance falls within the
# correct class, but also that there are no strong artifacts that would make us
# distrust the model. In this sense, our approach benefits from both: on
# one hand it can adjust itself to the dataset, and on the other hand
# is not expected to suffer from bad generalisation.
#
# References
# ----------
#
# .. [#Training_QEKs]
#
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, and <NAME>.
# "Training Quantum Embedding Kernels on Near-Term Quantum Computers."
# `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021.
#
# .. [#Alignment]
#
# <NAME>, <NAME>, and <NAME>.
# "An overview of kernel alignment and its applications."
# `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
| 3.890625 | 4 |
main.py | scottkaz/PyLoopover | 0 | 2322 | #!/usr/bin/python3
import pygame
import random
import time
##VARIABLES TO CHANGE
width = 500
height = 500
stats_height = 150
board_size = 5
window_name = "PyLoopover "+str(board_size)+"x"+str(board_size)
scramble_turns = 50
t_round = 3
FPS = 30
##DONT CHANGE THESE BOIS
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (32,200,32)
keys = {"w":0,"a":0,"s":0,"d":0,"q":0}
last_was_Q = False
class Tile:
def __init__(self,number,s):
self.number = number
n = number-1
self.color = ((n/s)*(255/s),(n%s)*(255/s),128)
def draw(self,screen,font,x,y,width,height):
pygame.draw.rect(screen,self.color,(x,y,width,height))
text = font.render(str(self.number),True,BLACK)
screen.blit(text,(x,y))
class Board:
content = []
start_t=0
end_t=0
game=False
moves = 0
def __init__(self,size):
self.size = size
for i in range(0,size):
self.content.append([])
for j in range(0,size):
self.content[i].append(None)
self.content[i][j] = Tile(i+j*size+1,size)
def rotate_left(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i-1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_right(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i+1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_down(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i-1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def rotate_up(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i+1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def draw(self,screen,font):
for i in range(0,self.size):
for j in range(0,self.size):
w = (width / self.size)
h = (height / self.size)
x = i * w
y = j * h
self.content[i][j].draw(screen,font,x,y,w,h)
def scramble(self,n):
for i in range(0,n):
o = random.randint(0,3)
if o == 0:
self.rotate_left(random.randint(0,board_size-1))
elif o == 1:
self.rotate_right(random.randint(0,board_size-1))
elif o == 2:
self.rotate_up(random.randint(0,board_size-1))
else:
self.rotate_down(random.randint(0,board_size-1))
self.game=False
self.moves=0
return True
def is_solved(self):
for i in range(0,self.size):
for j in range(0,self.size):
if self.content[i][j].number != i+j*self.size+1:
return False
return True
def start_time(self):
print("time has started")
self.start_t = time.monotonic()
self.game = True
return self.start_time
def end_time(self):
print("time has ended")
self.end_t = time.monotonic()
return self.end_time
def get_time(self):
if (not self.is_solved()) and self.game:
return (time.monotonic() - self.start_t , BLACK)
elif self.is_solved() and self.game:
return (self.end_t - self.start_t , GREEN)
else:
return (0 , BLACK)
def main():
gameboard = Board(board_size)
pygame.init()
pygame.mixer.quit() #weird workaroud
#name the window & size it.
pygame.display.set_caption(window_name)
screen = pygame.display.set_mode((width,height+stats_height),0,32)
#setup framerate
pygame.time.set_timer(pygame.USEREVENT+1,int((1/FPS)*1000))
#setup event que
pygame.event.set_allowed(None) #start with no events allowed
pygame.event.set_allowed(pygame.USEREVENT+1) #timer event
pygame.event.set_allowed(pygame.KEYDOWN)
pygame.event.set_allowed(pygame.QUIT) #4 quitters
#setup fonts
font = pygame.font.SysFont('mono',int((width/board_size)/1.14))
font2 = pygame.font.SysFont('mono',int(stats_height/2.3))
#main l00p
running = True
while running:
#eevveeentttss???
event = pygame.event.wait()
if event.type == pygame.USEREVENT+1:
#a fresh canvas
screen.fill(WHITE)
#draw stats
time = gameboard.get_time()
time_str = str( int( time[0] * (10 ** t_round) ) / (10 ** t_round) )
text_timer = font2.render("Time :"+time_str,True,time[1])
text_moves = font2.render("Moves:"+str(gameboard.moves),True,time[1])
screen.blit(text_timer,(0,height))
screen.blit(text_moves,(0,height+(stats_height/2)))
#draw board
gameboard.draw(screen,font)
#update da screeeeeen
pygame.display.update()
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
elif event.type == pygame.KEYDOWN:
k = chr(event.key) #gimme a CHAR, not some weird integer
domap = {
"w":"gameboard.rotate_up(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"a":"gameboard.rotate_right(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"s":"gameboard.rotate_down(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"d":"gameboard.rotate_left(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"q":"gameboard.scramble(scramble_turns)"
} #i guess?
if k in ['w','a','s','d','q']:
#starting game logic
if k == "q":
last_was_Q = True
else:
if last_was_Q:
gameboard.start_time()
last_was_Q = False
exec(domap[k])
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
#for quitters
elif event.type == pygame.QUIT:
print("Quitting...")
running = False
else:
print("err0r, bAd 3v3nt lol")
assert False
if __name__ == "__main__":
main()
| 3.140625 | 3 |
test3_05.py | yoojunwoong/python_review01 | 0 | 2323 | # for문에서 continue 사용하기, continue = skip개념!!!
for i in range(1,11):
if i == 6:
continue;
print(i);
print(i);
print(i);
print(i);
print(i);
| 3.515625 | 4 |
csmpe/core_plugins/csm_install_operations/exr/package_lib.py | anushreejangid/csmpe-main | 0 | 2324 | <gh_stars>0
# =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
"""
NCS4K
Production Packages
External Names Internal Names
ncs4k-full-x.iso-6.0.2
ncs4k-mini-x.iso-6.0.2
ncs4k-k9sec.pkg-6.0.2
ncs4k-mpls.pkg-6.0.2
ncs4k-mcast.pkg-6.0.2
ncs4k-mgbl.pkg-6.0.2
NCS6K
Production Packages
External Names Internal Names
ncs6k-doc.pkg-5.2.4 ncs6k-doc-5.2.4
ncs6k-li.pkg-5.2.4 ncs6k-li-5.2.4
ncs6k-mcast.pkg-5.2.4 ncs6k-mcast-5.2.4
ncs6k-mgbl.pkg-5.2.4 ncs6k-mgbl-5.2.4
ncs6k-mini-x.iso-5.2.4 ncs6k-mini-x-5.2.4
ncs6k-mpls.pkg-5.2.4 ncs6k-mpls-5.2.4
ncs6k-sysadmin.iso-5.2.4 ncs6k-sysadmin-5.2.4
ncs6k-full-x.iso-5.2.4 ncs6k-full-x-5.2.4
ncs6k-5.2.5.CSCuy47880.smu ncs6k-5.2.5.CSCuy47880-1.0.0 <- subversion added
Engineering Packages
External Names Internal Names
ncs6k-mcast.pkg-5.2.5.47I.DT_IMAGE ncs6k-mcast-5.2.5.47I
ncs6k-mini-x.iso-6.1.0.07I.DT_IMAGE ncs6k-xr-5.2.5.47I
ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i.smu ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i
ASR9K-64
Production Packages - not finalized yet
External Names Internal Names
asr9k-mcast-x64-2.0.0.0-r611.x86_64.rpm asr9k-mcast-x64-2.0.0.0-r611
asr9k-bgp-x64-1.0.0.0-r611.x86_64.rpm asr9k-bgp-x64-1.0.0.0-r611
asr9k-mgbl-x64-3.0.0.0-r611.x86_64.rpm asr9k-mgbl-x64-3.0.0.0-r611
asr9k-full-x64.iso-6.1.1 asr9k-xr-6.1.1
asr9k-mini-x64.iso-6.1.1 asr9k-xr-6.1.1
Engineering Packages
External Names Internal Names
asr9k-mcast-x64-2.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mcast-x64-2.0.0.0-r61116I
asr9k-bgp-x64-1.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-bgp-x64-1.0.0.0-r61116I
asr9k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mgbl-x64-3.0.0.0-r61116I
asr9k-full-x64.iso-6.1.1.16I.DT_IMAGE asr9k-full-x64-6.1.1.16I
asr9k-mini-x64.iso-6.1.1.16I.DT_IMAGE asr9k-mini-x64-6.1.1.16I
NCS5K
Production Packages
External Names Internal Names
ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1
ncs5k-full-x.iso-6.0.1 ncs5k-xr-6.0.1
ncs5k-mini-x.iso-6.0.1 ncs5k-xr-6.0.1
ncs5k-mcast-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mcast-2.0.0.0-r601
ncs5k-mgbl-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mgbl-2.0.0.0-r601
ncs5k-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mpls-2.0.0.0-r601
ncs5k-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-k9sec-2.0.0.0-r601
ncs5k-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-isis-2.0.0.0-r601
ncs5k-ospf-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-ospf-2.0.0.0-r601
Engineering Packages
External Names Internal Names
ncs5k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.0.1.16I.DT_IMAGE ncs5k-mgbl-3.0.0.0-r60116I
ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1.26I
ncs5k-full-x.iso-6.0.1.16I.DT_IMAGE ncs5k-xr-6.0.1.16I
NCS5500
Production Packages
External Names Internal Names
ncs5500-eigrp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-eigrp-2.0.0.0-r601
ncs5500-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-isis-2.0.0.0-r601
ncs5500-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-k9sec-2.0.0.0-r601
ncs5500-m2m-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-m2m-2.0.0.0-r601
ncs5500-mgbl-3.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mgbl-3.0.0.0-r601
ncs5500-mini-x.iso-6.0.1 ncs5500-xr-6.0.1
ncs5500-mpls-te-rsvp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-te-rsvp-2.0.0.0-r601
ncs5500-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-2.0.0.0-r601
ncs5500-ospf-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-ospf-1.0.0.0-r601
ncs5500-parser-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-parser-1.0.0.0-r601
"""
import re
platforms = ['asr9k', 'ncs1k', 'ncs4k', 'ncs5k', 'ncs5500', 'ncs6k', 'xrv9k']
version_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k": # 61117I or 611 or 6.1.1.17I or 6.1.1
re.compile("(?P<VERSION>(\d+\d+\d+(\d+\w+)?)|(\d+\.\d+\.\d+(\.\d+\w+)?)(?!\.\d)(?!-))"),
"ncs4k ncs6k": # 5.2.4 or 5.2.4.47I
re.compile("(?P<VERSION>\d+\.\d+\.\d+(\.\d+\w+)?)"),
}
smu_re = re.compile("(?P<SMU>CSC[a-z]{2}\d{5})")
subversion_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k":
re.compile("-(?P<SUBVERSION>\d+\.\d+\.\d+\.\d+)-"), # 2.0.0.0
"ncs4k ncs6k":
re.compile("CSC.*(?P<SUBVERSION>\d+\.\d+\.\d+?)"), # 0.0.4
}
class SoftwarePackage(object):
def __init__(self, package_name):
self.package_name = package_name
self._platform = None
self._package_type = None
self._version = None
self._smu = None
self._subversion = None
@property
def platform(self):
if not self._platform:
for platform in platforms:
if platform + "-" in self.package_name:
self._platform = platform
break
return self._platform
@property
def package_type(self):
if not self._package_type:
# For ASR9K-X64, NCS1K, NCS5K, NCS5500:
# Extract the package type string before X.X.X.X
# For NCS6K
# Extract the package type string before X.X.X
pattern = '-\d+\.\d+\.\d+' if self.platform == 'ncs6k' or \
self.platform == 'ncs4k' else '-\d\.\d\.\d.\d'
if self.platform and self.platform in self.package_name:
match = re.search(pattern, self.package_name)
# Special handling for mini, full, and sysadmin ISO on ASR9K-X64, NCS1K, NCS5K, NCS5500
# Example: ncs5500-mini-x.iso-6.0.1, asr9k-full-x64.iso-6.1.1
# Package type string is before the 3 part version string
# External Name: ncs5k-goldenk9-x.iso-6.3.1.11I.0, Internal Name: ncs5k-goldenk9-x-6.3.1.11I
if not match and sum([x in self.package_name for x in ['full', 'mini', 'sysadmin', 'goldenk9']]) > 0:
# Use the three part match for these ISO packages
match = re.search('-\d+\.\d+\.\d+', self.package_name)
if match:
# Extract the package type
self._package_type = self.package_name[0:match.start()].replace(self.platform + '-', '')
if self._package_type:
# Takes care the external to internal name matching
# Example, ncs6k-mgbl.pkg-5.2.5 -> mgbl, ncs5500-mini-x.iso-6.0.1 -> mini-x
self._package_type = self._package_type.replace('.pkg', '').replace('.iso', '')
return self._package_type
@property
def version(self):
if not self._version:
dict_values = self.get_values(version_dict, self.platform)
if self.platform and dict_values:
to_match = self.package_name.replace(self.platform, '')
result = re.search(dict_values, to_match)
if result:
self._version = result.group("VERSION")
return self._version
@property
def smu(self):
if not self._smu:
result = re.search(smu_re, self.package_name)
if result:
self._smu = result.group("SMU")
return self._smu
@property
def subversion(self):
if not self._subversion:
dict_values = self.get_values(subversion_dict, self.platform)
if self.platform and dict_values:
# For NCS6K, only need to consider subversion if it is a SMU.
if self.platform in ["asr9k", "ncs1k", "ncs5k", "ncs5500", "xrv9k"] or self.smu:
to_match = self.package_name.replace(self.platform, '')
result = re.search(dict_values, to_match)
if result:
self._subversion = result.group("SUBVERSION")
return self._subversion
def get_values(self, dictionary, key):
for keys in dictionary.keys():
if key in keys.split():
return dictionary.get(keys)
return None
def is_valid(self):
return self.platform and self.version and (self.package_type or self.smu)
def __eq__(self, other):
result = self.platform == other.platform and \
(self.package_type == other.package_type) and \
self.version == other.version and \
self.smu == other.smu and \
(self.subversion == other.subversion if self.subversion and other.subversion else True)
return result
def __hash__(self):
return hash("{}{}{}{}{}".format(
self.platform, self.package_type, self.version, self.smu, self.subversion))
@staticmethod
def from_show_cmd(cmd):
software_packages = set()
data = cmd.split()
for line in data:
software_package = SoftwarePackage(line)
if software_package.is_valid():
software_packages.add(software_package)
return software_packages
@staticmethod
def from_package_list(pkg_list):
software_packages = set()
for pkg in pkg_list:
software_package = SoftwarePackage(pkg)
if software_package.is_valid():
""" for debugging
print('package_name', software_package.package_name,
'platform', software_package.platform, 'package_type', software_package.package_type,
'version', software_package.version, 'smu', software_package.smu,
'subversion', software_package.subversion)
"""
software_packages.add(software_package)
return software_packages
def __repr__(self):
return self.package_name
def __str__(self):
return self.__repr__()
| 1.046875 | 1 |
megaboat.py | xros/megaboat | 4 | 2325 | <reponame>xros/megaboat
# -*- coding: utf-8 -*-
# Copyright to <NAME>.
# Any distrubites of this copy should inform its author. If for commercial, please inform the author for authentication. Apr 2014
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from lxml import etree
import time
import json
import urllib
import urllib2
# For media posting
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
class ParsingContainer(object):
"""Parsing Wechat messages for whose types are of : 'text', 'image', 'voice', 'video', 'location', 'link'
After making a new instance of the class, need to declare the 'MsgType'
For example,
$~ python
>>> holder = ParsingContainer()
>>> hasattr(holder, "_Content")
>>> True
>>> holder.initType(MsgType='video')
>>> hasattr(holder, "_PicUrl")
>>> True
>>> holder.initType(MsgType='text') # Or we can just ellipsis this operation since by default its 'text'
>>> hasattr(holder, "_PicUrl")
>>> False
>>> hasattr(holder, "_Content")
>>> True
>>> holder.getElementByTag('Content')
>>> ''
"""
# By default, MsgType is set as 'text'
MsgType = 'text'
# Unique tages in all the mapping relationship
#
# For those tags in-common of normal message
global commonTag
commonTag = ['ToUserName', 'FromUserName', 'CreateTime', 'MsgId', 'MsgType']
# For normal message mapping
global normalMapping
normalMapping = {
'text':['Content'],
'image':['PicUrl', 'MediaId'],
'voice':['MediaId','Format'],
'video':['MediaId','ThumbMeiaId'],
'location':['Location_X','Location_Y','Scale', 'Label'],
'link':['Title','Description','Url'],
}
# For event message mapping
global eventMapping
eventMapping = {
# The list presents the combined tag set of the event message
'event':['Event','EventKey','Ticket','Latitude','Longitude','Precision' ],
}
# For recognition message mapping
global recognitionMapping
recognitionMapping = {
'voice':['MediaId','Format','Recognition'],
}
def __init__(self, incomingMessage='<xml></xml>'):
# pre-set some common variables
root = etree.fromstring(incomingMessage)
# The 5 ones in common
if root.find('ToUserName') is not None:
self._ToUserName = root.find('ToUserName').text
else:
self._ToUserName = ''
if root.find('FromUserName') is not None:
self._FromUserName = root.find('FromUserName').text
else:
self._FromUserName = ''
if root.find('CreateTime') is not None:
self._CreateTime = root.find('CreateTime').text
else:
self._CreateTime = '1000000000'
if root.find('MsgType') is not None:
self._MsgType = root.find('MsgType').text
else:
self._MsgType = ''
if root.find('MsgId') is not None:
self._MsgId = root.find('MsgId').text
else:
self._MsgId = ''
# Store the XML incomingMessage if has
# For text message only
if self.MsgType == 'text':
if root.find('Content') is not None:
self._Content = root.find('Content').text
else:
self._Content = ''
# For image message only
elif self.MsgType == 'image':
if root.find('PicUrl') is not None:
self._PicUrl = root.find('PicUrl').text
else:
self._PicUrl = ''
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
# For voice message only
elif self.MsgType == 'voice':
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
if root.find('Format') is not None:
self._Format = root.find('Format').text
else:
self._Format = ''
# For video message only
elif self.MsgType == 'video':
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
if root.find('ThumbMediaId') is not None:
self._ThumbMediaId = root.find('ThumbMediaId').text
else:
self._ThumbMediaId = ''
# For location message only
elif self.MsgType == 'location':
if root.find('Location_X') is not None:
self._Location_X = root.find('Location_X').text
else:
self._Location_X = ''
if root.find('Location_Y') is not None:
self._Location_Y = root.find('Location_Y').text
else:
self._Location_Y = ''
if root.find('Scale') is not None:
self._Scale = root.find('Scale').text
else:
self._Scale = ''
if root.find('Label') is not None:
self._Label = root.find('Label').text
else:
self._Label = ''
# For link message only
elif self.MsgType == 'link':
if root.find('Title') is not None:
self._Title = root.find('Title').text
else:
self._Title = ''
if root.find('Description') is not None:
self._Description = root.find('Description').text
else:
self._Description = ''
if root.find('Url') is not None:
self._Url = root.find('Url').text
else:
self._Url = ''
# For event message only
elif self.MsgType == 'event':
# It has to have a ```self._Event``` for event message certainly
if root.find('Event') is not None:
self._Event = root.find('Event').text
else:
self._Event = ''
if root.find('EventKey') is not None:
self._EventKey = root.find('EventKey').text
if root.find('Ticket') is not None:
self._Ticket = root.find('Ticket').text
if root.find('Latitude') is not None:
self._Latitude = root.find('Latitude').text
if root.find('Longitude') is not None:
self._Longitude = root.find('Longitude').text
if root.find('Precision') is not None:
self._Precision = root.find('Precision').text
def initType(self, MsgType='text', incomingMessage='<xml></xml>'):
''' To initialize message type
'''
MsgType_list = ['text', 'image', 'voice', 'video', 'location', 'link', 'event']
if MsgType not in MsgType_list:
raise ValueError, "MsgType '%s' not valid " % MsgType
for i in MsgType_list:
if MsgType == i:
self.MsgType = i
break
# Delete the common tags
for c in commonTag:
try:
delattr(self, '_' + c)
except:
pass
# Delete the unuseful elements in normalMapping
for k in normalMapping:
if k !=self.MsgType:
for m in normalMapping[k]:
try:
delattr(self, '_' + m)
except:
pass
# Delete the unuseful elements in eventMapping
for k in eventMapping:
for e in eventMapping[k]:
try:
delattr(self, '_' + e)
except:
pass
self.__init__(incomingMessage)
# releasing method
def __del__(self):
pass
#@property
def getElementByTag(self, tag):
'''To get element from the tag
'''
try:
gotten = getattr(self, "_" + tag)
except:
return None
##raise ValueError
#tmp = "Instance has no attribute _%s" % tag
#raise AttributeError, tmp
else:
return gotten
def digest(self, incomingMessage):
'''To digest the XML message passed from wechat server
Make the value variable
The 'incomingMessage' is of XML
According to its content this will assgin values to ```self.MsgType and etc..``` Logistics as the followings:
1) check parent message type :"MsgType"
2) check subclass message type if "Voice Recognition", "Event", "Normal"
3) check children class message type
'''
root = etree.fromstring(incomingMessage)
msgType = root.find("MsgType").text
# Get message type based from the ```incomingMessage``` variable
if msgType in ['text', 'image', 'voice', 'video', 'location', 'link', 'event']:
# Check if the incomingMessage has tag 'Recognition' then, it is a voice recognition message
if root.find("Recognition") is not None:
self.type = 'recognition'
# Check if the incomingMessage has tag 'Event' then, it is a voice event message
elif root.find("Event") is not None:
self.type = 'event'
# After all then 'normal' message
else:
self.type = 'normal'
# For normal messages
if self.type == 'normal':
if msgType == 'text':
self.initType('text', incomingMessage)
elif msgType == 'image':
self.initType('image', incomingMessage)
elif msgType == 'voice':
self.initType('voice', incomingMessage)
elif msgType == 'video':
self.initType('video', incomingMessage)
elif msgType == 'location':
self.initType('location', incomingMessage)
elif msgType == 'link':
self.initType('link', incomingMessage)
elif msgType == 'image':
self.initType('image', incomingMessage)
# TODO
# For event messages
if self.type == 'recognition':
self.initType('voice', incomingMessage)
# Construct a var ```self._Recognition``` since it is just of this more than that of 'normal message => voice'
self._Recognition = root.find("Recognition").text
# For recognition messages
if self.type == 'event':
self.initType('event', incomingMessage)
class RespondingContainer(object):
"""Package XML to reponse to determained wechat message
For more information please visit: http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF
Usage:
>>> rc = RespondingContainer()
>>> rc.initType('text') # Or we can ellipsis this since it is of 'text' by default
>>> # Notice we don't need to set the 'CreateTime' since it has been generated automatically :)
>>> rc.setElementByTag(FromUserName='the_server', ToUserName='the_wechat_client',Content='Hello dude!')
>>> tpl_out = rc.dumpXML()
>>> tpl_out
>>><xml>
<ToUserName>the_wechat_client</ToUserName>
<FromUserName>the_server</FromUserName>
<CreateTime>1397808770</CreateTime>
<MsgType>text</MsgType>
<Content>Hello dude!</Content>
</xml>
>>>
"""
def __init__(self, MsgType='text'):
self._MsgType = MsgType
# By default set root as the 'text' XML format
the_tpl = globals()['tpl_' + self._MsgType].encode('utf-8').decode('utf-8')
self.root = etree.fromstring(the_tpl)
#print self.root.find("FromUserName").text
#print type(self.root.find("FromUserName").text)
def initType(self, MsgType='text'):
tpl_list = ['text', 'image', 'voice', 'video', 'music', 'news']
if MsgType not in tpl_list:
raise ValueError, "Invalid responsing message MsgType '%s'" % MsgType
else:
## Load the template
#for i in tpl_list:
# if MsgType == i:
# self._MsgType = MsgType
# ## the the template
# the_xml = globals()['tpl_'+i]
# self.root = etree.fromstring( the_xml )
# break
## Set the default tag value
### Get all the tags
#child_list = []
#for child in self.root.getchildren():
# child_list += [str(child)]
### Attach 'tag' object to class to make something as : 'self._FromUserName'
#for i in child_list:
# if i == 'CreateTime':
# setattr(self,"_"+i, str(int(time.time())))
# else:
# setattr(self,"_"+i, '')
self.__init__(MsgType)
#def setElementByTag(self, tag):
def setElementByTag(self, **kwargs):
""" To package XML message into an object
Usage:
>>> setElementByTag(FromUserName='the_wechat_server',ToUserName='the_wechat_client',Content='Hello dude!')
# In this way we can then use ```dumpXML()``` to get the XML we need to reponse to wechat clients! :)
"""
## assign the basic time
self.root.find('CreateTime').text = str(int(time.time()))
#print "-----"
#print self._MsgType
## For text message only
if self._MsgType == 'text':
# To set attribute value to such as: 'self._FromUsername'
for k, v in kwargs.items():
try:
## assign value to the object
#getattr(self, "_"+k) = v
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
#raise AttributeError, "Message type '%s' has no attribute/tag '%s'" % (self._MsgType, k)
## For image message only
elif self._MsgType == 'image':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Image').find('MediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For voice message only
elif self._MsgType == 'voice':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Voice').find('MediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For video message only
elif self._MsgType == 'video':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Video').find('MediaId').text = v
elif k == 'Title':
self.root.find('Video').find('Title').text = v
elif k == 'Description':
self.root.find('Video').find('Description').text = v
elif k == 'MusicUrl':
self.root.find('Video').find('MusicUrl').text = v
elif k == 'HQMusicUrl':
self.root.find('Video').find('HQMusicUrl').text = v
elif k == 'ThumbMediaId':
self.root.find('Video').find('ThumbMediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For article message only
elif self._MsgType == 'article':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'ArticleCount':
self.root.find(k).text = v
if k == 'Articles':
# TODO to generate articles as
#print v
#print etree.tostring(self.root)
self.root.find('Video').find('MediaId').text = v
elif k == 'Title':
self.root.find('Video').find('Title').text = v
elif k == 'Description':
self.root.find('Video').find('Description').text = v
elif k == 'MusicUrl':
self.root.find('Video').find('MusicUrl').text = v
elif k == 'HQMusicUrl':
self.root.find('Video').find('HQMusicUrl').text = v
elif k == 'ThumbMediaId':
self.root.find('Video').find('ThumbMediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
def dumpXML(self):
# To dump the XML we need
# the ```self.root``` has been assigned already
return etree.tostring(self.root, encoding='utf-8',method='xml',pretty_print=True)
# The down blow are the templates of all the responsing message valid for wechat
# For more information, please visit : http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF
global tpl_text
global tpl_image
global tpl_voice
global tpl_video
global tpl_music
global tpl_news
tpl_text = u'''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[你好]]></Content>
</xml>'''
tpl_image = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[image]]></MsgType>
<Image>
<MediaId><![CDATA[media_id]]></MediaId>
</Image>
</xml>'''
tpl_voice = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[voice]]></MsgType>
<Voice>
<MediaId><![CDATA[media_id]]></MediaId>
</Voice>
</xml>'''
tpl_video = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[video]]></MsgType>
<Video>
<MediaId><![CDATA[media_id]]></MediaId>
<Title><![CDATA[title]]></Title>
<Description><![CDATA[description]]></Description>
</Video>
</xml>'''
tpl_music = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[music]]></MsgType>
<Music>
<Title><![CDATA[TITLE]]></Title>
<Description><![CDATA[DESCRIPTION]]></Description>
<MusicUrl><![CDATA[MUSIC_Url]]></MusicUrl>
<HQMusicUrl><![CDATA[HQ_MUSIC_Url]]></HQMusicUrl>
<ThumbMediaId><![CDATA[media_id]]></ThumbMediaId>
</Music>
</xml>'''
tpl_news = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[news]]></MsgType>
<ArticleCount>2</ArticleCount>
<Articles>
<item>
<Title><![CDATA[title1]]></Title>
<Description><![CDATA[description1]]></Description>
<PicUrl><![CDATA[picurl]]></PicUrl>
<Url><![CDATA[url]]></Url>
</item>
<item>
<Title><![CDATA[title]]></Title>
<Description><![CDATA[description]]></Description>
<PicUrl><![CDATA[picurl]]></PicUrl>
<Url><![CDATA[url]]></Url>
</item>
</Articles>
</xml>'''
# Positive response
class PositiveRespondingContainer(object):
'''Using wechat custom service API to pass 6 types of messages to those wechat clients \n
who sent messages to the public wechat service. Those 6 types of messages include:
text, image, voice, video, music, news
The dumped is of dict format.
We need to json.loads(the_dict_object) if we want to pass the right reponse back
'''
def __init__(self, MsgType='text'):
self._MsgType = MsgType
# By default set the ```self.the_dict``` as from the 'text' JSON format
the_json_tpl = globals()['json_' + self._MsgType].encode('utf-8').decode('utf-8')
self.the_dict = json.loads(the_json_tpl)
if MsgType == 'text':
pass
def initType(self, MsgType='text'):
if MsgType not in ['text', 'image', 'voice', 'video', 'music', 'news']:
raise ValueError, "It has no message type: '%s'" % MsgType
else:
# pass the message type to have ```self.the_dict```
self.__init__(MsgType)
def setElementByKey(self, **kwargs):
'''To set the ```self.the_dict``` according to the message type by such as ```initType(MsgType='text')```
Notice: all the kwargs 's key in this function should be of lower case. Official wechat define that. Don't claim '''
## For text message only
if self._MsgType == 'text':
for k, v in kwargs.items():
try:
if k == 'content':
self.the_dict['text'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For image message only
elif self._MsgType == 'image':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['image'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For voice message only
elif self._MsgType == 'voice':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['voice'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For video message only
elif self._MsgType == 'video':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['video'][k] = v
elif k == 'title':
self.the_dict['video'][k] = v
elif k == 'description':
self.the_dict['video'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For music message only
elif self._MsgType == 'music':
for k, v in kwargs.items():
try:
if k == 'musicurl':
self.the_dict['music'][k] = v
elif k == 'title':
self.the_dict['music'][k] = v
elif k == 'description':
self.the_dict['music'][k] = v
elif k == 'hqmusicurl':
self.the_dict['music'][k] = v
elif k == 'thumb_media_id':
self.the_dict['music'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For news message only
elif self._MsgType == 'news':
for k, v in kwargs.items():
try:
# here we just check whether the ```v``` is type of list the ```v``` should be packaged in a list already
# if list, then its the elment of the key ```articles``` for the news message
'''
"articles": [
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
},
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
}
]
'''
if k == 'articles':
if type(v) == list:
self.the_dict['news'][k] = v
else:
raise ValueError, "The value of the key 'articles' should be of type list"
elif k == 'touser':
self.the_dict['touser'] = v
elif k == 'msgtype':
self.the_dict['msgtype'] = 'news'
except Exception as e:
print e
raise e
# package article
def packageArticle(title= "default title", description="default description", url="http://www.baidu.com", picurl="http://www.baidu.com/img/bdlogo.gif"):
'''This will return an article in a list which contains a dict.
While construcing the JSON dumped,
This is used with the function ```setElementByKey(touser='someone', msgtype='news', articles=packageArticle())```
'''
return [{"title": title, "description":description, "url":url, "picurl":picurl}]
# to dump the the dict as for later on JSON loading
def dumpDict(self):
return self.the_dict
json_text = '''{
"touser":"OPENID",
"msgtype":"text",
"text":
{
"content":"Hello World"
}
}'''
json_image = '''{
"touser":"OPENID",
"msgtype":"image",
"image":
{
"media_id":"MEDIA_ID"
}
}'''
json_voice = '''{
"touser":"OPENID",
"msgtype":"voice",
"voice":
{
"media_id":"MEDIA_ID"
}
}'''
json_video = '''{
"touser":"OPENID",
"msgtype":"video",
"video":
{
"media_id":"MEDIA_ID",
"title":"TITLE",
"description":"DESCRIPTION"
}
}'''
json_music = '''{
"touser":"OPENID",
"msgtype":"music",
"music":
{
"title":"MUSIC_TITLE",
"description":"MUSIC_DESCRIPTION",
"musicurl":"MUSIC_URL",
"hqmusicurl":"HQ_MUSIC_URL",
"thumb_media_id":"THUMB_MEDIA_ID"
}
}'''
json_news = '''{
"touser":"OPENID",
"msgtype":"news",
"news":{
"articles": [
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
},
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
}
]
}
}'''
class SubscriberManager(object):
'''To manage the subscriber groups, profile, location, list.
Usage:
>>> sm = SubscriberManager()
>>> sm.loadToken('<KEY>')
>>> hisprofile = sm.getSubscriberProfile(openid='his_open_id', lang='zh_CN')
'''
def __init__(self, token=''):
self._token = token
def loadToken(self, token=''):
'''Firstly load the access token, then use the functions below'''
self._token = token
def getSubscriberProfile(self, openid='', lang='zh_CN'):
'''The open_id parameter is unique to unique wechat public service.
This function will return a dict if ```token``` and ```open_id``` are valid.
If not exists or not valid will return None.
For the parameter 'zh_CN', there are others: 'zh_TW, en'
For more information: please visit, http://mp.weixin.qq.com/wiki/index.php?title=%E8%8E%B7%E5%8F%96%E7%94%A8%E6%88%B7%E5%9F%BA%E6%9C%AC%E4%BF%A1%E6%81%AF'''
url = "https://api.weixin.qq.com/cgi-bin/user/info?access_token=" + self._token + "&openid=" + openid + "&lang=" + lang
try:
a = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
gotten = a.read()
a_dict = json.loads(gotten)
# means wrong appid or secret
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def createGroup(self, name=''):
'''Create a determained group name.
If created, then it will return the new group id of type 'int'.
If not, will return None.
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/create?access_token=" + self._token
postData = '{"group": {"name": "%s"} }' % name
request = urllib2.Request(url,data=postData)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict['group']['id']
def getAllgroups(self):
''' A dict will be returned.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E6.9F.A5.E8.AF.A2.E6.89.80.E6.9C.89.E5.88.86.E7.BB.84
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/get?access_token=" + self._token
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def getHisGroupID(self, openid=''):
'''Get a subscriber's group ID. The ID is of type 'int'.
If openid wrong or token invalid, 'None' will be returned.
For more information, please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E6.9F.A5.E8.AF.A2.E7.94.A8.E6.88.B7.E6.89.80.E5.9C.A8.E5.88.86.E7.BB.84'''
url = "https://api.weixin.qq.com/cgi-bin/groups/getid?access_token="+ self._token
postData = '{"openid":"%s"}' % openid
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict['groupid']
def updateGroupName(self, groupid='', new_name=''):
'''Update the determained group id with the new_name.
'True' or False if updated or not.
For more information, please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E4.BF.AE.E6.94.B9.E5.88.86.E7.BB.84.E5.90.8D
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/update?access_token=" + self._token
postData = '{"group":{"id":%s,"name":"%s"}}' % (groupid, new_name)
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
def moveHimToGroup(self, openid='', groupid=''):
'''Move him to other group.
'True' or 'False' if moved or not.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E7.A7.BB.E5.8A.A8.E7.94.A8.E6.88.B7.E5.88.86.E7.BB.84'''
url = "https://api.weixin.qq.com/cgi-bin/groups/members/update?access_token=" + self._token
postData = '{"openid":"%s","to_groupid":%s}' % (openid, groupid)
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
def getSubscriberList(self, next_openid=''):
'''To get subscriber list.
A dict will be return if valid.
If ```token``` and ```next_openid``` are valid, then a dict will be returned.
If the ```next_openid``` does not exist, official wechat server takes it as '' by default
If not, a 'None' will be returned.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E8%8E%B7%E5%8F%96%E5%85%B3%E6%B3%A8%E8%80%85%E5%88%97%E8%A1%A8
'''
url = "https://api.weixin.qq.com/cgi-bin/user/get?access_token=" + self._token + "&next_openid=" + next_openid
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def getAPIToken(appid='', appsecret=''):
'''Get wechat API token for cusmter service or others.
If ```appid``` and ```appsecret``` are correct then a string 'token' will be return.
If not , 'return None' '''
default_url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&'
url = default_url + 'appid=' + appid + '&secret=' + appsecret
try:
a = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
gotten = a.read()
a_dict = json.loads(gotten)
if a_dict.has_key('access_token'):
return a_dict['access_token']
# means wrong appid or secret
else:
return None
def postMessage2API(token='',messageString=''):
'''Using the token, post the message to determained user.
This returns a Boolean value'''
url = "https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=" + token
request = urllib2.Request(url, messageString)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
j = json.loads(response.read())
# The above works
#print j
# to check if the message was accepted
if j['errcode'] == 0:
return True
else:
return False
class MenuManager(object):
'''To manage the bottom menu of the wechat service
Usage:
>>> mm = MenuManager()
>>> mm.loadToken('something_the_api_token')
>>> flag = mm.createMenu('the_menu_format_constructed_from_a_JSON_as_a_string')
>>> flag
True
>>> menu_got = mm.getMenu()
>>> menu_got
{u'menu': {u'button': [{u'type': u'click', u'name': u'\u7b2c\u4e00\u94ae', u'key': u'V1001_TODAY_MUSIC', u'sub_button': []}, {u'type': u'click', u'name': u'\u7b2c\u4e8c\u94ae', u'key': u'V1001_TODAY_SINGER', u'sub_button': []}, {u'name': u'\u7b2c\u4e09\u94ae', u'sub_button': [{u'url': u'http://www.soso.com/', u'type': u'view', u'name': u'\u641c\u641c', u'sub_button': []}, {u'url': u'http://v.qq.com/', u'type': u'view', u'name': u'\u770b\u7535\u5f71', u'sub_button': []}, {u'type': u'click', u'name': u'\u5938\u6211\u5e05', u'key': u'<KEY>', u'sub_button': []}]}]}}
>>> flag2 = mm.deleteMenu()
>>> flag2
True
>>> mm.getMenu()
>>> # nothing gotten: it means no menu at all
'''
def __init__(self, token=''):
self._token = token
def loadToken(self, token=''):
'''Load the token before using other functions'''
self._token = token
def createMenu(self, menu_format=''):
'''Create menu, it needs a token and the menu format.
The ```menu_format``` is of type string.
But ```menu_format``` is constructed from a JSON.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E8%87%AA%E5%AE%9A%E4%B9%89%E8%8F%9C%E5%8D%95%E5%88%9B%E5%BB%BA%E6%8E%A5%E5%8F%A3
'''
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=" + token
request = urllib2.Request(url, menu_format)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
j = json.loads(response.read())
# The above works
#print j
# to check if the message was accepted
if j['errcode'] == 0:
return True
else:
return False
def getMenu(self):
'''Get the menu format from the API.
If there be, then a dict would be returned.
If not, 'None' will be returned.
'''
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/get?access_token="+ token
try:
response = urllib2.urlopen(url)
except Exception as e:
# its better to raise something here if the wechat remote server is down
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
if a_dict['errcode'] != 0:
return None
else:
return a_dict
else:
return a_dict
def deleteMenu(self):
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=" + token
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
class MediaManager(object):
'''There are four types of media suppored by wechat.
image, voice, video, thumb
Post the file to the offical wechat server and get the response.
'''
def __init__(self, media_type='image', token = ''):
self._media_type = media_type
self._token = token
def loadToken(self, token = ''):
self._token = token
def uploadMedia(self, media_type='image', media_path=''):
'''Post the determained media file to the offical URL
If the image is valid, then a_dict will be returned.
If not, 'None' will be returned.
For more information, please visit: http://mp.weixin.qq.com/wiki/index.php?title=%E4%B8%8A%E4%BC%A0%E4%B8%8B%E8%BD%BD%E5%A4%9A%E5%AA%92%E4%BD%93%E6%96%87%E4%BB%B6'''
if media_type not in ['image', 'voice', 'video', 'thumb']:
raise ValueError, "Media type: '%s' not valid" % media_type
else:
self._media_type = media_type
url = "http://file.api.weixin.qq.com/cgi-bin/media/upload?access_token=" + self._token + "&type=" + self._media_type
register_openers()
try:
datagen, headers = multipart_encode({"image1": open(media_path,"rb")})
except Exception as e:
#print e
return None
#raise e
else:
request = urllib2.Request(url,data=datagen,headers=headers)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
| 2.375 | 2 |
root/converter/__init__.py | thasmarinho/root-image-editor | 2 | 2326 | from .color_converter import ColorConverter
from .scale_converter import ScaleConverter
| 1.109375 | 1 |
chirun/plastex/color/__init__.py | sthagen/chirun-ncl-chirun | 5 | 2327 | <filename>chirun/plastex/color/__init__.py
from plasTeX import Command, Environment
def ProcessOptions(options, document):
colors = {}
document.userdata.setPath('packages/color/colors', colors)
colors['red'] = latex2htmlcolor('1,0,0')
colors['green'] = latex2htmlcolor('0,1,0')
colors['blue'] = latex2htmlcolor('0,0,1')
colors['cyan'] = latex2htmlcolor('0,1,1')
colors['magenta'] = latex2htmlcolor('1,0,1')
colors['yellow'] = latex2htmlcolor('1,1,0')
colors['white'] = latex2htmlcolor('1')
colors['black'] = latex2htmlcolor('0')
colors['gray'] = latex2htmlcolor('0.9')
colors['darkred'] = latex2htmlcolor('0.8,0,0')
colors['middlered'] = latex2htmlcolor('0.9,0,0')
colors['lightred'] = latex2htmlcolor('1,0,0')
colors['darkgreen'] = latex2htmlcolor('0,0.6,0')
colors['middlegreen'] = latex2htmlcolor('0,0.8,0')
colors['lightgreen'] = latex2htmlcolor('0,1,0')
colors['darkblue'] = latex2htmlcolor('0,0,0.8')
colors['middleblue'] = latex2htmlcolor('0,0,0.9')
colors['lightblue'] = latex2htmlcolor('0,0,1')
colors['darkcyan'] = latex2htmlcolor('0.6,0.8,0.8')
colors['middlecyan'] = latex2htmlcolor('0,0.8,0.8')
colors['darkmagenta'] = latex2htmlcolor('0.8,0.6,0.8')
colors['middlemagenta'] = latex2htmlcolor('1,0,0.6')
colors['darkyellow'] = latex2htmlcolor('0.8,0.8,0.6')
colors['middleyellow'] = latex2htmlcolor('1,1,0.2')
colors['darkgray'] = latex2htmlcolor('0.5')
colors['middlegray'] = latex2htmlcolor('0.7')
colors['lightgray'] = latex2htmlcolor('0.9')
def latex2htmlcolor(arg, model='rgb', named=None):
named = named or {}
if model == 'named':
return named.get(arg, '')
if ',' in arg:
parts = [float(x) for x in arg.split(',')]
# rgb
if len(parts) == 3:
red, green, blue = parts
red = min(int(red * 255), 255)
green = min(int(green * 255), 255)
blue = min(int(blue * 255), 255)
# cmyk
elif len(parts) == 4:
c, m, y, k = parts
red, green, blue = [int(255 * x) for x in [1 - c * (1 - k) - k, 1 - m * (1 - k) - k, 1 - y * (1 - k) - k]]
else:
return arg.strip()
else:
try:
red = green = blue = float(arg)
except ValueError:
try:
return named[arg]
except KeyError:
return arg.strip()
return '#%.2X%.2X%.2X' % (int(red), int(green), int(blue))
class definecolor(Command):
args = 'name:str model:str color:str'
def invoke(self, tex):
a = self.parse(tex)
u = self.ownerDocument.userdata
colors = u.getPath('packages/color/colors')
colors[a['name']] = latex2htmlcolor(a['color'], a['model'], colors)
class textcolor(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class color(Environment):
args = '[ model:str ] color:str'
def invoke(self, tex):
a = self.parse(tex)
self.style['color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class pagecolor(Command):
args = '[ model:str ] color:str'
class colorbox(Command):
args = '[ model:str ] color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
class fcolorbox(Command):
args = '[ model:str ] bordercolor:str color:str self'
def invoke(self, tex):
a = self.parse(tex)
self.style['background-color'] = latex2htmlcolor(a['color'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors'))
self.style['border'] = ('1px solid %s'
% latex2htmlcolor(a['bordercolor'], a['model'],
self.ownerDocument.userdata.getPath('packages/color/colors')))
class normalcolor(Command):
pass
| 1.976563 | 2 |
ex035A11.py | gabrieleliasdev/python-cev | 0 | 2328 | <reponame>gabrieleliasdev/python-cev
print('\033[0;33;44mTeste\033[m')
print('\033[4;33;44mTeste\033[m')
print('\033[1;35;43mTeste\033[m')
print('\033[7;32;40mTeste\033[m')
print('\033[7;30mTeste\033[m')
print(" - - - Testando os 40 - - -")
print("\033[0;37;40mPreto\033[m")
print("\033[0;30;41mVermelho\033[m")
print("\033[0;30;42mVerde\033[m")
print("\033[0;30;43mAmarelo\033[m")
print("\033[0;30;44mRoxo\033[m")
print("\033[0;30;45mLilás\033[m")
print("\033[0;30;46mTurquesa\033[m")
print("\033[0;30;47mBranco\033[m")
print("\033[0;36;48mFundo Transparente\033[m")
print(" - - - Testando os 30 - - -")
print("\033[0;37;40mTeste\033[m")
print("\033[0;31;40mTeste\033[m")
print("\033[0;32;40mTeste\033[m")
print("\033[0;33;40mTeste\033[m")
print("\033[0;34;40mTeste\033[m")
print("\033[0;35;40mTeste\033[m")
print("\033[0;36;40mTeste\033[m")
print("\033[0;37;40mTeste\033[m")
print("\033[0;38;40mTeste\033[m")
print(" - - - Testando os 1ª - - -")
print("\033[0;30;47mTeste\033[m")
print("\033[1;30;47mTexto em Negrito\033[m")
print("\033[2;30;47mTeste\033[m")
print("\033[3;30;47mFonta Itálica\033[m")
print("\033[4;30;47mSublinhado\033[m")
print("\033[5;30;47mTeste\033[m")
print("\033[6;30;47mTeste\033[m")
print("\033[7;30;47mTeste\033[m")
print("\033[7;38;47mTeste\033[m") | 2.671875 | 3 |
tg/release.py | TurboGears/tg2 | 812 | 2329 | """TurboGears project related information"""
version = "2.4.3"
description = "Next generation TurboGears"
long_description="""
TurboGears brings together a best of breed python tools
to create a flexible, full featured, and easy to use web
framework.
TurboGears 2 provides an integrated and well tested set of tools for
everything you need to build dynamic, database driven applications.
It provides a full range of tools for front end javascript
develeopment, back database development and everything in between:
* dynamic javascript powered widgets (ToscaWidgets2)
* automatic JSON generation from your controllers
* powerful, designer friendly XHTML based templating
* object or route based URL dispatching
* powerful Object Relational Mappers (SQLAlchemy)
The latest development version is available in the
`TurboGears Git repositories`_.
.. _TurboGears Git repositories:
https://github.com/TurboGears
"""
url="http://www.turbogears.org/"
author= "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and the TurboGears community"
email = "<EMAIL>"
copyright = """Copyright 2005-2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and contributors"""
license = "MIT"
| 1.835938 | 2 |
swm-master/swm-master/calc/mean_e_calc.py | m2lines/subgrid | 1 | 2330 | <filename>swm-master/swm-master/calc/mean_e_calc.py
## PRODUCE MEAN CALCULATIONS AND EXPORT AS .NPY
from __future__ import print_function
path = '/home/mkloewer/python/swm/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import time as tictoc
from netCDF4 import Dataset
# OPTIONS
runfolder = 15
print('Calculating subgrid-EKE means from run ' + str(runfolder))
## read data
runpath = path+'data/run%04i' % runfolder
skip = 5*365
e = np.load(runpath+'/e_sub.npy')[skip:,:,:]
print('run %i read.' % runfolder)
## create ouputfolder
try:
os.mkdir(runpath+'/analysis')
except:
pass
## U,V,H mean
em = e.mean(axis=0)
print('e mean done.')
## STORING
dic = dict()
all_var2export = ['em']
for v in all_var2export:
exec('dic[v] ='+v)
np.save(runpath+'/analysis/mean_e.npy',dic)
print('Everything stored.')
| 2.328125 | 2 |
bogglesolver.py | gammazero/pybogglesolver | 0 | 2331 | <reponame>gammazero/pybogglesolver
"""
Module to generate solutions for Boggle grids.
<NAME> 22 Dec. 2009
"""
from __future__ import print_function
import os
import sys
import collections
import trie
if sys.version < '3':
range = xrange
class BoggleSolver(object):
"""
This class uses an external words file as a dictionary of acceptable boggle
words. When an instance of this class is created, it sets up an internal
dictionary to look up valid boggle answers. The class' solve method can be
used repeatedly to generate solutions for different boggle grids.
"""
def __init__(self, words_file, xlen=4, ylen=4, pre_compute_adj=False):
"""Create and initialize BoggleSolver instance.
This creates the internal trie for fast word lookup letter-by-letter.
Words that begin with capital letters and words that are not within the
specified length limits are filtered out.
Arguments:
xlen -- X dimension (width) of board.
ylen -- Y dimension (height) of board.
pre_compute_adj -- Pre-compute adjacency matrix.
"""
assert(xlen > 1)
assert(ylen > 1)
self.xlen = xlen
self.ylen = ylen
self.board_size = xlen * ylen
if pre_compute_adj:
self.adjacency = BoggleSolver._create_adjacency_matrix(xlen, ylen)
else:
self.adjacency = None
self.trie = BoggleSolver._load_dictionary(
words_file, self.board_size, 3)
def solve(self, grid):
"""Generate all solutions for the given boggle grid.
Arguments:
grid -- A string of 16 characters representing the letters in a boggle
grid, from top left to bottom right.
Returns:
A list of words found in the boggle grid.
None if given invalid grid.
"""
if self.trie is None:
raise RuntimeError('words file not loaded')
if len(grid) != self.board_size:
raise RuntimeError('invalid board')
board = list(grid)
trie = self.trie
words = set()
q = collections.deque()
adjs = self.adjacency
for init_sq in range(self.board_size):
c = board[init_sq]
q.append((init_sq, c, trie.get_child(c), [init_sq]))
while q:
parent_sq, prefix, pnode, seen = q.popleft()
pnode_get_child = pnode.get_child
if adjs:
adj = adjs[parent_sq]
else:
adj = self._calc_adjacency(self.xlen, self.ylen, parent_sq)
for cur_sq in adj:
if cur_sq in seen:
continue
c = board[cur_sq]
cur_node = pnode_get_child(c)
if cur_node is None:
continue
s = prefix + c
q.append((cur_sq, s, cur_node, seen + [cur_sq]))
if cur_node._is_word:
if s[0] == 'q':
# Rehydrate q-words with 'u'.
words.add('qu' + s[1:])
else:
words.add(s)
return words
def show_grid(self, grid):
"""Utility method to print a 4x4 boggle grid.
Arguments:
grid -- A string of X*Y characters representing the letters in a boggle
grid, from top left to bottom right.
"""
for y in range(self.ylen):
print('+' + '---+' * self.xlen)
yi = y * self.xlen
line = ['| ']
for x in range(self.xlen):
cell = grid[yi+x].upper()
if cell == 'Q':
line.append('Qu')
line.append('| ')
else:
line.append(cell)
line.append(' | ')
print(''.join(line))
print('+' + '---+' * self.xlen)
def find_substrings(self, string):
"""Find all valid substrings in the given string.
This method is not necessary for the boggle solver, but is a utility
for testing that all substrings of a word are correctly found.
Arguments:
string -- The string in which to search for valid substrings.
Returns:
List of substrings that are valid words.
"""
found = set()
for start in range(len(string)):
cur = self.trie
letters = [None] * self.board_size
count = 0
for l in string[start:]:
letters[count] = l
count += 1
cur = cur.get_child(l)
if cur is None:
break
if cur._is_word:
found.add(''.join(letters[:count]))
if not cur.has_children():
break
return found
@staticmethod
def _load_dictionary(words_file, max_len, min_len):
"""Private method to create the trie for finding words.
Arguments:
words_file -- Path of file containing words for reference.
Return:
Count of words inserted into trie.
"""
if not os.path.isfile(words_file):
raise RuntimeError('words file not found: ' + words_file)
print('creating dictionary...')
root = trie.Trie()
word_count = 0
if words_file.endswith('gz'):
import gzip
f = gzip.open(words_file)
elif words_file.endswith('bz2'):
import bz2
f = bz2.BZ2File(words_file)
else:
f = open(words_file)
try:
for word in f:
if sys.version < '3':
word = word.strip()
else:
word = word.strip().decode("utf-8")
# Skip words that are too long or too short.
word_len = len(word)
if word_len > max_len or word_len < min_len:
continue
# Skip words that start with capital letter.
if word[0].isupper():
continue
if word[0] == 'q':
# Skip words starting with q not followed by u.
if word[1] != 'u':
continue
# Remove "u" from q-words so that only the q is matched.
word = 'q' + word[2:]
root.insert(word)
word_count += 1
finally:
f.close()
print('Loaded', word_count, 'words from file.')
return root
@staticmethod
def _create_adjacency_matrix(xlim, ylim):
adj_list = [[]] * (ylim * xlim)
for i in range(ylim * xlim):
# Current cell index = y * xlim + x
adj = BoggleSolver._calc_adjacency(xlim, ylim, i)
adj_list[i] = adj
return adj_list
@staticmethod
def _calc_adjacency(xlim, ylim, sq):
adj = []
y = int(sq / xlim)
x = sq - (y * xlim)
# Look at row above current cell.
if y-1 >= 0:
above = sq - xlim
# Look to upper left.
if x-1 >= 0:
adj.append(above - 1)
# Look above.
adj.append(above)
# Look upper right.
if x+1 < xlim:
adj.append(above + 1)
# Look at same row that current cell is on.
# Look to left of current cell.
if x-1 >= 0:
adj.append(sq - 1)
# Look to right of current cell.
if x+1 < xlim:
adj.append(sq + 1)
# Look at row below current cell.
if y+1 < ylim:
below = sq + xlim
# Look to lower left.
if x-1 >= 0:
adj.append(below - 1)
# Look below.
adj.append(below)
# Look to lower rigth.
if x+1 < xlim:
adj.append(below + 1)
return adj
| 2.828125 | 3 |
tests/manage/monitoring/pagerduty/test_ceph.py | MeridianExplorer/ocs-ci | 0 | 2332 | <gh_stars>0
import logging
import pytest
from ocs_ci.framework.testlib import (
managed_service_required,
skipif_ms_consumer,
tier4,
tier4a,
)
from ocs_ci.ocs import constants
from ocs_ci.utility import pagerduty
log = logging.getLogger(__name__)
@tier4
@tier4a
@managed_service_required
@skipif_ms_consumer
@pytest.mark.polarion_id("OCS-2771")
def test_corrupt_pg_pd(measure_corrupt_pg):
"""
Test that there is appropriate incident in PagerDuty when Placement group
on one OSD is corrupted and that this incident is cleared when the corrupted
ceph pool is removed.
"""
api = pagerduty.PagerDutyAPI()
# get incidents from time when manager deployment was scaled down
incidents = measure_corrupt_pg.get("pagerduty_incidents")
target_label = constants.ALERT_CLUSTERERRORSTATE
# TODO(fbalak): check the whole string in summary and incident alerts
assert pagerduty.check_incident_list(
summary=target_label,
incidents=incidents,
urgency="high",
)
api.check_incident_cleared(
summary=target_label,
measure_end_time=measure_corrupt_pg.get("stop"),
)
| 2.15625 | 2 |
STANchap7.py | phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan | 1 | 2333 | # -*- coding: utf-8 -*-
import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns
from cmdstanpy import CmdStanModel
#%% load data
data = pd.read_csv("data/overfitting.csv", index_col = 'case_id')
data.columns
data.info()
feature_names = data.columns.str.startswith("var_")
predictors = data[data.columns[feature_names]]
labels = data["Target_Practice"]
ix_training = data.train == 1
training_data = predictors[ix_training]
training_labels = labels[ix_training]
ix_testing = data.train == 0
testing_data = predictors[ix_testing]
testing_labels = labels[ix_testing]
sns.displot(training_data.values.flatten(), bins = "sqrt", kde = True)
pca = prince.PCA(n_components = 2, as_array = False).fit(training_data)
pca.plot_row_coordinates(training_data, color_labels = training_labels)
pca.column_correlations(training_data).plot.scatter(x = 0, y = 1) # weird column name
#%% Roshan Sharma model
mdl_data = { # problem with JSON dump => cast to python native type
'N': ix_training.sum().tolist(),
'N2': ix_testing.sum().tolist(),
'K': feature_names.sum().tolist(),
'y': training_labels.values.tolist(),
'X': training_data.values.tolist(),
'new_X': testing_data.values.tolist(),
}
modelfile = "OverfittingRoshanSharma.stan"
with open(modelfile, "w") as file: file.write("""
data {
int N; // the number of training observations
int N2; // the number of test observations
int K; // the number of features
int y[N]; // the response
matrix[N,K] X; // the model matrix
matrix[N2,K] new_X; // the matrix for the predicted values
}
parameters { // regression parameters
real alpha;
vector[K] beta;
}
transformed parameters {
vector[N] linpred = alpha + X * beta;
}
model {
alpha ~ cauchy(0, 10); // prior for the intercept following Gelman 2008
beta ~ student_t(1, 0, 0.03);
y ~ bernoulli_logit(linpred);
}
generated quantities { // y values predicted by the model
vector[N2] y_pred = alpha + new_X * beta;
}
""")
var_name_array = ["alpha"] + [f"beta[{i+1}]" for i in range(mdl_data["K"])]
var_name_combi = ["alpha", "beta"]
sm = CmdStanModel(stan_file = modelfile)
# maximum likelihood estimation
optim = sm.optimize(data = mdl_data).optimized_params_pd
optim[optim.columns[~optim.columns.str.startswith("lp")]]
plt.plot(optim[var_name_array[1:]].values[0])
# variational inference
vb = sm.variational(data = mdl_data)
vb.variational_sample.columns = vb.variational_params_dict.keys()
vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.str.startswith(("lp", "log_"))]
vb.variational_params_pd[var_name_array]
vb.variational_sample[var_name_array]
# Markov chain Monte Carlo
fit = sm.sample(
data = mdl_data, show_progress = True, chains = 4,
iter_sampling = 50000, iter_warmup = 10000, thin = 5
)
fit.draws().shape # iterations, chains, parameters
fit.summary().loc[var_name_array] # pandas DataFrame
print(fit.diagnose())
posterior = {k: fit_modif.stan_variable(k) for k in var_name_combi}
az_trace = az.from_cmdstanpy(fit)
az.summary(az_trace).loc[var_name] # pandas DataFrame
az.plot_trace(az_trace, var_names = ["alpha"])
az.plot_forest(az_trace, var_names = ["beta"])
sample_pred = fit.stan_variable('y_pred')
# <NAME> model: DOES NOT WORK yet
# need to figure out how to marginalize all discrete params
| 2.59375 | 3 |
watcher.py | factabulous/matgrindr | 1 | 2334 | # -*- coding: utf-8 -*-
import json
import threading
import os
import time
import mats
import sys
import requests
import traceback
import re
from util import debug, error
class MatsLoader(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the file to async load
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.queue = queue
self.filename = filename
self.daemon = True
def run(self):
try:
m = mats.Materials(self.filename)
self.queue.put( { 'mats': m._materials } )
except:
self.queue.put( { 'error': 'Failed to load materials ' + str(sys.exc_info()[0]) } )
class MatsLoaderRemote(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the cache file - we only read the remote file
if the cache is old (or missing)
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.filename = filename
self.queue = queue
self.daemon = True
self.integerRe = re.compile(r'^-?\d+$')
self.floatRe = re.compile(r'^-?\d+(\.\d+)?$')
self.arrayRe = re.compile(r'^\[.*\]$')
def need_refresh(self):
"""
Returns True if the local cache needs a refresh.
"""
if not os.path.exists(self.filename):
return True
mtime = os.path.getmtime(self.filename)
now = time.time()
return mtime < now - 24 * 3600 # Daily update
def array_splitter(self, value):
return [ x[1:-1] for x in value[1:-1].split(", ") ]
def detect(self, value):
"""
Looks at a data value and converts into an appropriate type
(maybe should look at using ast instead)
"""
if self.integerRe.match(value):
return int(value)
elif self.floatRe.match(value):
return float(value)
elif self.arrayRe.match(value):
return self.array_splitter(value)
else:
return value
def parse(self, text):
"""
Parse a string field containing all the data ina TSV
into an array of dicts. Mainly split out so we can test
"""
lines = text.replace("\r", "").split("\n")
fields = lines[0].split("\t")
res = []
for entry in lines[1:]:
values = entry.split("\t")
if len(values) < len(fields):
continue
v = {}
for k in range(0, len(fields)):
v[fields[k]] = self.detect(values[k])
res.append(v)
return res
def run(self):
try:
if self.need_refresh():
r = requests.get("https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0")
res = self.parse(r.text)
if res:
with open(self.filename, "wt") as cache_file:
json.dump(res, cache_file)
self.queue.put( { 'mats': res } )
debug("Async remote mats loader from tsv is completed {} entries".format(len(res)))
else:
error("Async remote mats loader failed - zero records")
else:
with open(self.filename, "rt") as cache_file:
res = json.load(cache_file)
self.queue.put( { 'mats': res } )
debug("loader from cache is completed {} entries".format(len(res)))
except:
self.queue.put( { 'error': 'Failed to load tsv materials ' + str(sys.exc_info()[0]) + ' ' + traceback.format_exc() } )
| 2.625 | 3 |
luoxia/pipelines.py | pighui/luoxia | 2 | 2335 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from scrapy import Request
from scrapy.pipelines.images import ImagesPipeline
from luoxia import settings
class LuoxiaPipeline(object):
def process_item(self, item, spider):
title= item['title']
bookname = item['bookname']
titlename = item['titlename']
text = item['text']
path = "books/%s/%s/" % (title, bookname)
if not os.path.exists(path):
os.makedirs(path)
with open(path+titlename+'.txt', 'a', encoding='utf-8') as f:
f.write(text)
return item
class LuoxiaImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for url in item['image_urls']:
yield Request(url, meta={'title': item['title'],
'bookname': item['bookname']})
def item_completed(self, results, item, info):
# 将下载完成后的图片路径设置到item中
item['images'] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
# 为每本书创建一个目录,存放她自己所有的图片
title = request.meta['title']
bookname = request.meta['bookname']
book_dir = os.path.join(settings.IMAGES_STORE, title +'/'+ bookname)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
# 从连接中提取扩展名
try:
ext_name = request.url.split(".")[-1]
except:
ext_name = 'jpg'
# 返回的相对路径
return '%s/%s/%s.%s' % (title, bookname, bookname, ext_name) | 2.796875 | 3 |
aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py | jpmarques19/tensorflwo-test | 5 | 2336 | <reponame>jpmarques19/tensorflwo-test<filename>aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py<gh_stars>1-10
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import numpy as np
import tensorflow as tf
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
'classes': tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predictions['classes'])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def _load_training_data(base_dir):
x_train = np.load(os.path.join(base_dir, 'train_data.npy'))
y_train = np.load(os.path.join(base_dir, 'train_labels.npy'))
return x_train, y_train
def _load_testing_data(base_dir):
x_test = np.load(os.path.join(base_dir, 'eval_data.npy'))
y_test = np.load(os.path.join(base_dir, 'eval_labels.npy'))
return x_test, y_test
def _parse_args():
parser = argparse.ArgumentParser()
# Data, model, and output directories.
# model_dir is always passed in from SageMaker.
# By default this is a S3 path under the default bucket.
parser.add_argument('--model_dir', type=str)
parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAINING'))
parser.add_argument('--hosts', type=list, default=json.loads(os.environ.get('SM_HOSTS')))
parser.add_argument('--current-host', type=str, default=os.environ.get('SM_CURRENT_HOST'))
return parser.parse_known_args()
def serving_input_fn():
inputs = {'x': tf.placeholder(tf.float32, [None, 784])}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
if __name__ == '__main__':
args, _ = _parse_args()
train_data, train_labels = _load_training_data(args.train)
eval_data, eval_labels = _load_testing_data(args.train)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=args.model_dir)
# Set up logging for predictions
# Log the values in the 'Softmax' tensor with label 'probabilities'
tensors_to_log = {'probabilities': 'softmax_tensor'}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True
)
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False
)
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=20000)
eval_spec = tf.estimator.EvalSpec(eval_input_fn)
tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)
if args.current_host == args.hosts[0]:
mnist_classifier.export_savedmodel(args.sm_model_dir, serving_input_fn)
| 2.203125 | 2 |
common/util/autoware_debug_tools/scripts/stop_reason2pose.py | loop-perception/AutowareArchitectureProposal.iv | 12 | 2337 | #! /usr/bin/env python3
# Copyright 2020 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import sys
from autoware_planning_msgs.msg import StopReasonArray
from case_converter import pascal2snake
from geometry_msgs.msg import PoseStamped
import numpy as np
import rclpy
from rclpy.node import Node
from rtree import index
from self_pose_listener import SelfPoseListener
class StopReason2PoseNode(Node):
def __init__(self, options):
super().__init__("stop_reason2pose_node")
self._options = options
self._sub_pose = self.create_subscription(
StopReasonArray, self._options.topic_name, self._on_stop_reasons, 1
)
self._pub_pose_map = {}
self._idx_map = {}
self._pose_map = {}
self._self_pose_listener = SelfPoseListener()
self.timer = self.create_timer((1.0 / 100), self._self_pose_listener.get_current_pose)
def _on_stop_reasons(self, msg):
for stop_reason in msg.stop_reasons:
snake_case_stop_reason = pascal2snake(stop_reason.reason)
if len(stop_reason.stop_factors) == 0:
self.get_logger().warn("stop_factor is null")
return
for stop_factor in stop_reason.stop_factors:
pose = PoseStamped()
pose.header = msg.header
pose.pose = stop_factor.stop_pose
# Get nearest pose
th_dist = 1.0
nearest_pose_id = self._get_nearest_pose_id(
snake_case_stop_reason, pose.pose, th_dist
)
if nearest_pose_id:
self._update_pose(snake_case_stop_reason, pose.pose, nearest_pose_id)
pose_id = nearest_pose_id
else:
pose_id = self._register_pose(snake_case_stop_reason, pose.pose)
pose_topic_name = "{snake_case_stop_reason}_{pose_id}".format(**locals())
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
if pose_topic_name not in self._pub_pose_map:
self._pub_pose_map[pose_topic_name] = self.create_publisher(
PoseStamped, topic_ns + pose_topic_name, 1
)
self._pub_pose_map[pose_topic_name].publish(pose)
# Publish nearest stop_reason without number
nearest_pose = PoseStamped()
nearest_pose.header = msg.header
nearest_pose.pose = self._get_nearest_pose_in_array(
stop_reason, self._self_pose_listener.self_pose
)
if nearest_pose.pose:
if snake_case_stop_reason not in self._pub_pose_map:
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
self._pub_pose_map[snake_case_stop_reason] = self.create_publisher(
PoseStamped, topic_ns + snake_case_stop_reason, 1
)
self._pub_pose_map[snake_case_stop_reason].publish(nearest_pose)
def _get_nearest_pose_in_array(self, stop_reason, self_pose):
poses = [stop_factor.stop_pose for stop_factor in stop_reason.stop_factors]
if not poses:
return None
distances = map(lambda p: StopReason2PoseNode.calc_distance2d(p, self_pose), poses)
nearest_idx = np.argmin(distances)
return poses[nearest_idx]
def _find_nearest_pose_id(self, name, pose):
if name not in self._idx_map:
self._idx_map[name] = index.Index()
return self._idx_map[name].nearest(StopReason2PoseNode.pose2boundingbox(pose), 1)
def _get_nearest_pose_id(self, name, pose, th_dist):
nearest_pose_ids = list(self._find_nearest_pose_id(name, pose))
if not nearest_pose_ids:
return None
nearest_pose_id = nearest_pose_ids[0]
nearest_pose = self._get_pose(name, nearest_pose_id)
if not nearest_pose:
return None
dist = StopReason2PoseNode.calc_distance2d(pose, nearest_pose)
if dist > th_dist:
return None
return nearest_pose_id
def _get_pose(self, name, pose_id):
if name not in self._pose_map:
return None
return self._pose_map[name][pose_id]
def _update_pose(self, name, pose, pose_id):
self._pose_map[name][id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
def _register_pose(self, name, pose):
if name not in self._pose_map:
self._pose_map[name] = {}
pose_id = len(self._pose_map[name]) + 1
self._pose_map[name][pose_id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
return pose_id
@staticmethod
def calc_distance2d(pose1, pose2):
p1 = pose1.position
p2 = pose2.position
return math.hypot(p1.x - p2.x, p1.y - p2.y)
@staticmethod
def pose2boundingbox(pose):
return [pose.position.x, pose.position.y, pose.position.x, pose.position.y]
def main(args):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument("topic_name", type=str)
ns = parser.parse_args(args)
stop_reason2pose_node = StopReason2PoseNode(ns)
rclpy.spin(stop_reason2pose_node)
stop_reason2pose_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main(sys.argv[1:])
| 2.203125 | 2 |
aiounittest/case.py | tmaila/aiounittest | 55 | 2338 | import asyncio
import unittest
from .helpers import async_test
class AsyncTestCase(unittest.TestCase):
''' AsyncTestCase allows to test asynchoronus function.
The usage is the same as :code:`unittest.TestCase`. It works with other test frameworks
and runners (eg. `pytest`, `nose`) as well.
AsyncTestCase can run:
- test of synchronous code (:code:`unittest.TestCase`)
- test of asynchronous code, supports syntax with
:code:`async`/:code:`await` (Python 3.5+) and
:code:`asyncio.coroutine`/:code:`yield from` (Python 3.4)
Code to test:
.. code-block:: python
import asyncio
async def async_add(x, y, delay=0.1):
await asyncio.sleep(delay)
return x + y
async def async_one():
await async_nested_exc()
async def async_nested_exc():
await asyncio.sleep(0.1)
raise Exception('Test')
Tests:
.. code-block:: python
import aiounittest
class MyTest(aiounittest.AsyncTestCase):
async def test_await_async_add(self):
ret = await async_add(1, 5)
self.assertEqual(ret, 6)
async def test_await_async_fail(self):
with self.assertRaises(Exception) as e:
await async_one()
'''
def get_event_loop(self):
''' Method provides an event loop for the test
It is called before each test, by default :code:`aiounittest.AsyncTestCase` creates the brand new event
loop everytime. After completion, the loop is closed and then recreated, set as default,
leaving asyncio clean.
.. note::
In the most common cases you don't have to bother about this method, the default implementation is a receommended one.
But if, for some reasons, you want to provide your own event loop just override it. Note that :code:`AsyncTestCase` won't close such a loop.
.. code-block:: python
class MyTest(aiounittest.AsyncTestCase):
def get_event_loop(self):
self.my_loop = asyncio.get_event_loop()
return self.my_loop
'''
return None
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if name.startswith('test_') and asyncio.iscoroutinefunction(attr):
return async_test(attr, loop=self.get_event_loop())
else:
return attr
| 3.390625 | 3 |
US Flag.py | Code-Master1234/Turtle_Flags_File_Hub | 0 | 2339 | <reponame>Code-Master1234/Turtle_Flags_File_Hub<gh_stars>0
import turtle as t
def rectangle(horizontal, vertical, color):
t.pendown()
t.pensize(1)
t.color(color)
t.begin_fill()
for counter in range(2):
t.forward(horizontal)
t.right(90)
t.forward(vertical)
t.right(90)
t.end_fill()
t.penup()
def star(length, points, color):
sumangle = ((points*2)-2) * 180
oneangle = sumangle/points
smallangle = oneangle/3.5
bigangle = oneangle - smallangle
t.color(color)
t.pendown()
t.begin_fill()
t.penup()
for counter in range(points):
t.forward(length)
t.left(smallangle)
t.forward(length)
t.left(bigangle)
t.end_fill()
t.penup()
gotoy = 222
t.speed(0)
t.setup(988,520)
t.goto(494,260)
t.pendown()
for counter in range(7):
t.setheading(-90)
rectangle(40,988,'#B22234')
t.setheading(-90)
t.forward(80)
t.penup()
t.setheading(0)
t.goto(-494,260)
t.pendown()
rectangle(494,280,'#3C3B6E')
t.goto(-474,245)
for counter in range(4):
for counter in range(6):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.penup()
t.goto(-434,gotoy)
gotoy = gotoy - 28
t.pendown()
for counter in range(5):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.goto(-476,gotoy)
gotoy = gotoy - 28
for counter in range(6):
star(9,5,'white')
t.setheading(0)
t.forward(84)
t.penup()
t.hideturtle()
| 2.96875 | 3 |
linked-list/delete_zero_sum_nodes.py | bryanlimy/technical-interview | 3 | 2340 | <reponame>bryanlimy/technical-interview<filename>linked-list/delete_zero_sum_nodes.py
# Given a linked list, remove consecutive nodes that sums up to zero
# https://www.careercup.com/question?id=5717797377146880
from util import *
def remove_zero_sum(head):
start = head
new = None
root = None
while start:
end = start.next
total = start.value
zero = False
while end:
total += end.value
if total == 0:
zero = True
start = end
break
end = end.next
if not zero and not new:
new = Node(start.value)
root = new
elif not zero and new:
new.next = Node(start.value)
start = start.next
return root
if __name__ == "__main__":
s1 = [6, -6, 8, 4, -12, 9, 8, -8]
s2 = [4, 6 - 10, 8, 9, 10, -19, 10, -18, 20, 25]
s3 = [2, 3, -5, 10, 10, -5, -5, 20, 5, -5]
samples = [s1,s2,s3]
for sample in samples:
head = create_linked_list(sample)
print(linked_list_to_list(head))
result = remove_zero_sum(head)
print(linked_list_to_list(result))
print("\n")
| 3.703125 | 4 |
src/azure-cli/azure/cli/command_modules/maps/custom.py | psignoret/azure-cli | 1 | 2341 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from knack.prompting import prompt_y_n
from knack.util import CLIError
from azure.mgmt.maps.models import (
MapsAccountCreateParameters,
Sku)
ACCOUNT_LOCATION = 'global'
logger = get_logger(__name__)
def create_account(client, resource_group_name, account_name, sku_name='S0', tags=None, force=None):
terms = 'By creating an Azure Maps account, you agree that you have read and agree to the ' \
'\nLicense (https://azure.microsoft.com/support/legal/) and ' \
'\nPrivacy Statement (https://privacy.microsoft.com/privacystatement).'
hint = 'Please select.'
client_denied_terms = 'You must agree to the License and Privacy Statement to create an account.'
# Show ToS message to the user
logger.warning(terms)
# Prompt yes/no for the user, if --force parameter is not passed in.
if not force:
option = prompt_y_n(hint)
if not option:
raise CLIError(client_denied_terms)
# Submit query
sku = Sku(name=sku_name)
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=sku, tags=tags)
return client.create_or_update(resource_group_name, account_name, maps_account_create_params)
def list_accounts(client, resource_group_name=None):
# Retrieve accounts via subscription
if resource_group_name is None:
return client.list_by_subscription()
# Retrieve accounts via resource group
return client.list_by_resource_group(resource_group_name)
def generic_update_account(instance, sku_name=None, tags=None):
# Pre-populate with old instance
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=instance.sku,
tags=instance.tags)
# Update fields with new parameter values
if sku_name:
maps_account_create_params.sku.name = sku_name
if tags:
maps_account_create_params.tags = tags
return maps_account_create_params
| 2.28125 | 2 |
examples/wsdm2022/run_seqreco_B.py | Leavingseason/wsdm2022-seqrecsys | 0 | 2342 | import sys
import os
from tempfile import TemporaryDirectory
import numpy as np
import tensorflow.compat.v1 as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.utils.constants import SEED
from recommenders.models.deeprec.deeprec_utils import (
prepare_hparams
)
from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab
from recommenders.datasets.download_utils import maybe_download
from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel
# from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel
# from recommenders.models.deeprec.models.sequential.caser import CaserModel as SeqModel
# from recommenders.models.deeprec.models.sequential.gru4rec import GRU4RecModel as SeqModel
# from recommenders.models.deeprec.models.sequential.sum import SUMModel as SeqModel
#from recommenders.models.deeprec.models.sequential.nextitnet import NextItNetModel
from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator
#from recommenders.models.deeprec.io.nextitnet_iterator import NextItNetIterator
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
yaml_file = '/home/jialia/wsdm/src/recommenders/examples/wsdm2022/sli_rec_B.yaml'
RANDOM_SEED = SEED # Set None for non-deterministic result
# data_path = os.path.join("tests", "resources", "deeprec", "slirec")
# data_path = '/home/jialia/wsdm/seq_datasets/B_full_feature_v2'
data_path = sys.argv[1]
print(os.path.abspath(data_path)) ## the path where I enter the cmd
# for test
train_file = os.path.join(data_path, r'train_instances.txt')
valid_file = os.path.join(data_path, r'valid_instances.txt')
test_file = os.path.join(data_path, r'valid.tsv')
pred_file = os.path.join(data_path, r'inter_test.tsv')
final_pred_file = os.path.join(data_path, r'final_test.tsv')
user_vocab = os.path.join(data_path, r'user_vocab.pkl')
item_vocab = os.path.join(data_path, r'item_vocab.pkl')
cate_vocab = os.path.join(data_path, r'category_vocab.pkl')
output_file = os.path.join(data_path, r'inter_test_output.txt')
submit_file = os.path.join(data_path, r'final_test_output.txt')
train_num_ngs = 9 # number of negative instances with a positive instance for training
valid_num_ngs = 9 # number of negative instances with a positive instance for validation
test_num_ngs = 9 # number of negative instances with a positive instance for testing
_create_vocab(
[train_file, valid_file],
user_vocab, item_vocab, cate_vocab
)
### NOTE:
### remember to use `_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)` to generate the user_vocab, item_vocab and cate_vocab files, if you are using your own dataset rather than using our demo Amazon dataset.
hparams = prepare_hparams(yaml_file,
# user_dropout=False,
embed_l2=0.,
layer_l2=0.,
enable_BN=True, ##-- True
learning_rate=0.001, # set to 0.01 if batch normalization is disable else 0.001
epochs=100000,
EARLY_STOP=40000,
batch_size=400,
show_step=5000,
MODEL_DIR=os.path.join(data_path, "model/"),
SUMMARIES_DIR=os.path.join(data_path, "summary/"),
user_vocab=user_vocab,
item_vocab=item_vocab,
cate_vocab=cate_vocab,
need_sample=False,
train_num_ngs=train_num_ngs, # provides the number of negative instances for each positive instance for loss computation.
loss='log_loss', #'log_loss', 'softmax'
max_seq_length=50,
cont_feat_len=85,
use_cont_feat=False,
init_item_emb=False,
shuffle=True
)
print(hparams.values)
input_creator = SequentialIterator
model = SeqModel(hparams, input_creator, seed=RANDOM_SEED)
# model.load_model(os.path.join(data_path, "model_20220118_20k_0.8923", 'step_20000'))
with Timer() as train_time:
model = model.fit(train_file, valid_file, valid_num_ngs=9, eval_metric='auc')
print('Time cost for training is {0:.2f} mins'.format(train_time.interval/60.0))
### model = model.fit(test_file, test_file, valid_num_ngs=9, eval_metric='auc') ##-- quick test
model.load_model(os.path.join(data_path, "model", 'best_model'))
res_syn = model.run_eval(test_file, num_ngs=9)
print(res_syn)
model.predict(pred_file, output_file)
model.predict(final_pred_file, submit_file)
# print('Job finished. B, continue training = 20k, seq=50')
# print('Job finished. B_v2, epoch=50k, seq=100')
## ASVD: 0.867497
## GRU: 0.877529
## SLi-Rec: 0.892736
## B_v4: 0.8937
print("Job:B_full_feature_v2, with BN, no cont feat, seq=50, shuffle=True")
## B_full_feature_v2 no cont_feat, with BN
##5k: 0.8778
##10k: 0.8827
##20k: 0.8848
##25k: 0.8824
##35k: 0.8878
##40k: 0.8903
##45k: 0.8876
##50k: 0.8925
##55k: 0.8903
##60k: 0.8894
##65k: 0.8904
##70k: 0.8814
##75k: 0.8896
##80k: 0.8871
##85k: 0.8920
## with shuffle:
##5k: 0.8793
##10k: 0.8884
##15k: 0.8898
##20k: 0.8923
##25k: 0.8908
##30k: 0.8895
##35k: 0.8888
##40k: 0.8913
##45k: 0.8909
##50k: 0.8876
##65k: 0.8881 | 1.742188 | 2 |
ctypesgen/ctypedescs.py | fgrie/ctypesgen | 0 | 2343 | #!/usr/bin/env python
"""
ctypesgen.ctypedescs contains classes to represent a C type. All of them
classes are subclasses of CtypesType.
Unlike in previous versions of ctypesgen, CtypesType and its subclasses are
completely independent of the parser module.
The most important method of CtypesType and its subclasses is the py_string
method. str(ctype) returns a string which, when evaluated in the wrapper
at runtime, results in a ctypes type object.
For example, a CtypesType
representing an array of four integers could be created using:
>>> ctype = CtypesArray(CtypesSimple("int",True,0),4)
str(ctype) would evaluate to "c_int * 4".
"""
import warnings
__docformat__ = "restructuredtext"
ctypes_type_map = {
# typename signed longs
("void", True, 0): "None",
("int", True, 0): "c_int",
("int", False, 0): "c_uint",
("int", True, 1): "c_long",
("int", False, 1): "c_ulong",
("char", True, 0): "c_char",
("char", False, 0): "c_ubyte",
("short", True, 0): "c_short",
("short", False, 0): "c_ushort",
("float", True, 0): "c_float",
("double", True, 0): "c_double",
("double", True, 1): "c_longdouble",
("int8_t", True, 0): "c_int8",
("__int8", True, 0): "c_int8",
("int16_t", True, 0): "c_int16",
("__int16", True, 0): "c_int16",
("int32_t", True, 0): "c_int32",
("__int32", True, 0): "c_int32",
("int64_t", True, 0): "c_int64",
("__int64", True, 0): "c_int64",
("uint8_t", True, 0): "c_uint8",
("uint16_t", True, 0): "c_uint16",
("uint32_t", True, 0): "c_uint32",
("uint64_t", True, 0): "c_uint64",
("_Bool", True, 0): "c_bool",
}
ctypes_type_map_python_builtin = {
("int", True, 2): "c_longlong",
("int", False, 2): "c_ulonglong",
("size_t", True, 0): "c_size_t",
("apr_int64_t", True, 0): "c_int64",
("off64_t", True, 0): "c_int64",
("apr_uint64_t", True, 0): "c_uint64",
("wchar_t", True, 0): "c_wchar",
("ptrdiff_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("ssize_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble
("va_list", True, 0): "c_void_p",
}
# This protocol is used for walking type trees.
class CtypesTypeVisitor(object):
def visit_struct(self, struct):
pass
def visit_enum(self, enum):
pass
def visit_typedef(self, name):
pass
def visit_error(self, error, cls):
pass
def visit_identifier(self, identifier):
# This one comes from inside ExpressionNodes. There may be
# ExpressionNode objects in array count expressions.
pass
def visit_type_and_collect_info(ctype):
class Visitor(CtypesTypeVisitor):
def visit_struct(self, struct):
structs.append(struct)
def visit_enum(self, enum):
enums.append(enum)
def visit_typedef(self, typedef):
typedefs.append(typedef)
def visit_error(self, error, cls):
errors.append((error, cls))
def visit_identifier(self, identifier):
identifiers.append(identifier)
structs = []
enums = []
typedefs = []
errors = []
identifiers = []
v = Visitor()
ctype.visit(v)
return structs, enums, typedefs, errors, identifiers
# Remove one level of indirection from funtion pointer; needed for typedefs
# and function parameters.
def remove_function_pointer(t):
if type(t) == CtypesPointer and type(t.destination) == CtypesFunction:
return t.destination
elif type(t) == CtypesPointer:
t.destination = remove_function_pointer(t.destination)
return t
else:
return t
class CtypesType(object):
def __init__(self):
super(CtypesType, self).__init__()
self.errors = []
def __repr__(self):
return '<Ctype (%s) "%s">' % (type(self).__name__, self.py_string())
def error(self, message, cls=None):
self.errors.append((message, cls))
def visit(self, visitor):
for error, cls in self.errors:
visitor.visit_error(error, cls)
class CtypesSimple(CtypesType):
"""Represents a builtin type, like "char" or "int"."""
def __init__(self, name, signed, longs):
super(CtypesSimple, self).__init__()
self.name = name
self.signed = signed
self.longs = longs
def py_string(self, ignore_can_be_ctype=None):
return ctypes_type_map[(self.name, self.signed, self.longs)]
class CtypesSpecial(CtypesType):
def __init__(self, name):
super(CtypesSpecial, self).__init__()
self.name = name
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesTypedef(CtypesType):
"""Represents a type defined by a typedef."""
def __init__(self, name):
super(CtypesTypedef, self).__init__()
self.name = name
def visit(self, visitor):
if not self.errors:
visitor.visit_typedef(self.name)
super(CtypesTypedef, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.name
class CtypesBitfield(CtypesType):
def __init__(self, base, bitfield):
super(CtypesBitfield, self).__init__()
self.base = base
self.bitfield = bitfield
def visit(self, visitor):
self.base.visit(visitor)
super(CtypesBitfield, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return self.base.py_string()
class CtypesPointer(CtypesType):
def __init__(self, destination, qualifiers):
super(CtypesPointer, self).__init__()
self.destination = destination
self.qualifiers = qualifiers
def visit(self, visitor):
if self.destination:
self.destination.visit(visitor)
super(CtypesPointer, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "POINTER(%s)" % self.destination.py_string()
class CtypesArray(CtypesType):
def __init__(self, base, count):
super(CtypesArray, self).__init__()
self.base = base
self.count = count
def visit(self, visitor):
self.base.visit(visitor)
if self.count:
self.count.visit(visitor)
super(CtypesArray, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
if self.count is None:
return "POINTER(%s)" % self.base.py_string()
if type(self.base) == CtypesArray:
return "(%s) * int(%s)" % (self.base.py_string(), self.count.py_string(False))
else:
return "%s * int(%s)" % (self.base.py_string(), self.count.py_string(False))
class CtypesNoErrorCheck(object):
def py_string(self, ignore_can_be_ctype=None):
return "None"
def __bool__(self):
return False
__nonzero__ = __bool__
class CtypesPointerCast(object):
def __init__(self, target):
self.target = target
def py_string(self, ignore_can_be_ctype=None):
return "lambda v,*a : cast(v, {})".format(self.target.py_string())
class CtypesFunction(CtypesType):
def __init__(self, restype, parameters, variadic, attrib=dict()):
super(CtypesFunction, self).__init__()
self.restype = restype
self.errcheck = CtypesNoErrorCheck()
# Don't allow POINTER(None) (c_void_p) as a restype... causes errors
# when ctypes automagically returns it as an int.
# Instead, convert to POINTER(c_void). c_void is not a ctypes type,
# you can make it any arbitrary type.
if (
type(self.restype) == CtypesPointer
and type(self.restype.destination) == CtypesSimple
and self.restype.destination.name == "void"
):
# we will provide a means of converting this to a c_void_p
self.restype = CtypesPointer(CtypesSpecial("c_ubyte"), ())
self.errcheck = CtypesPointerCast(CtypesSpecial("c_void_p"))
# Return "String" instead of "POINTER(c_char)"
if self.restype.py_string() == "POINTER(c_char)":
if "const" in self.restype.qualifiers:
self.restype = CtypesSpecial("c_char_p")
else:
self.restype = CtypesSpecial("String")
self.argtypes = [remove_function_pointer(p) for p in parameters]
self.variadic = variadic
self.attrib = attrib
def visit(self, visitor):
self.restype.visit(visitor)
for a in self.argtypes:
a.visit(visitor)
super(CtypesFunction, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "CFUNCTYPE(UNCHECKED(%s), %s)" % (
self.restype.py_string(),
", ".join([a.py_string() for a in self.argtypes]),
)
last_tagnum = 0
def anonymous_struct_tagnum():
global last_tagnum
last_tagnum += 1
return last_tagnum
def fmt_anonymous_struct_tag(num):
return "anon_%d" % num
def anonymous_struct_tag():
return fmt_anonymous_struct_tag(anonymous_struct_tagnum())
class CtypesStruct(CtypesType):
def __init__(self, tag, attrib, variety, members, src=None):
super(CtypesStruct, self).__init__()
self.tag = tag
self.attrib = attrib
self.variety = variety # "struct" or "union"
self.members = members
if type(self.tag) == int or not self.tag:
if type(self.tag) == int:
self.tag = fmt_anonymous_struct_tag(self.tag)
else:
self.tag = anonymous_struct_tag()
self.anonymous = True
else:
self.anonymous = False
if self.members == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def get_required_types(self):
types = super(CtypesStruct, self).get_required_types()
types.add((self.variety, self.tag))
return types
def visit(self, visitor):
visitor.visit_struct(self)
if not self.opaque:
for name, ctype in self.members:
ctype.visit(visitor)
super(CtypesStruct, self).visit(visitor)
def get_subtypes(self):
if self.opaque:
return set()
else:
return set([m[1] for m in self.members])
def py_string(self, ignore_can_be_ctype=None):
return "%s_%s" % (self.variety, self.tag)
last_tagnum = 0
def anonymous_enum_tag():
global last_tagnum
last_tagnum += 1
return "anon_%d" % last_tagnum
class CtypesEnum(CtypesType):
def __init__(self, tag, enumerators, src=None):
super(CtypesEnum, self).__init__()
self.tag = tag
self.enumerators = enumerators
if not self.tag:
self.tag = anonymous_enum_tag()
self.anonymous = True
else:
self.anonymous = False
if self.enumerators == None:
self.opaque = True
else:
self.opaque = False
self.src = src
def visit(self, visitor):
visitor.visit_enum(self)
super(CtypesEnum, self).visit(visitor)
def py_string(self, ignore_can_be_ctype=None):
return "enum_%s" % self.tag
| 2.875 | 3 |
pytouch/elements.py | Krai53n/pytouch | 0 | 2344 | <gh_stars>0
from random import randint
import pyxel
from constants import Screen
import cursors
class Text:
def __init__(self, text):
self._text = text
self._symbol_len = 3
self._padding_len = 1
def _count_text_len(self):
return (
self._symbol_len + self._padding_len
) * len(self._text) - self._padding_len
def _x_text_center_position(self):
return (Screen.width - self._count_text_len()) // 2
def draw(self):
pyxel.text(self._x_text_center_position(), 0, self._text, 2)
class Score:
def __init__(self, padding_right=2, padding_top=2):
self._padding_right = padding_right
self._padding_top = padding_top
self.score = 0
def increase(self):
self.score += 1
def reduce(self):
self.score -= 1
def draw(self):
pyxel.text(self._padding_right, self._padding_top,
f"Score: {self.score}", (Screen.bg - 2) % 16)
class Circle:
def __init__(self):
self._r = 0
self._col = (Screen.bg - 1) % 16
def zero(self):
self._r = 0
def increase(self, size=1):
self._r += size
@property
def r(self):
return self._r
@r.setter
def r(self, r):
self._r = r
@property
def col(self):
return self._col
@col.setter
def col(self, color):
self._col = color
def draw(self, x, y):
pyxel.circ(x, y, self._r, self._col)
class ReachCircle(Circle):
def __init__(self):
super().__init__()
self.min_r = 10
self.respawn()
@property
def x(self):
return self._x
@property
def y(self):
return self._y
def respawn(self):
self._x = randint(self._r, Screen.width - self._r)
self._y = randint(self._r, Screen.height - self._r)
self._r = randint(self.min_r, min(Screen.width, Screen.height) // 2) - 4
def draw(self):
pyxel.circb(self._x, self._y, self._r, self._col)
| 2.953125 | 3 |
app/volume/admin_process.py | cleve/varidb | 0 | 2345 | <reponame>cleve/varidb<filename>app/volume/admin_process.py
from pulzarutils.utils import Utils
from pulzarutils.utils import Constants
from pulzarutils.messenger import Messenger
from pulzarcore.core_db import DB
class AdminProcess:
"""Handle admin operations from manage
"""
def __init__(self, logger):
self.TAG = self.__class__.__name__
self.logger = logger
self.utils = Utils()
self.messenger = Messenger()
self.mark_of_local_verification = b'varidb_execute_file_verification'
def process_request(self, url_path):
"""Get request type, checking for key value.
"""
regex_result = self.utils.get_search_regex(
url_path, Constants.RE_ADMIN)
if regex_result:
try:
call_path_list = regex_result.groups()[0].split('/')
call_path_list = [x for x in call_path_list if x != '']
# All nodes
if len(call_path_list) == 1 and call_path_list[0] == 'start_backup':
db_backup = DB(Constants.DB_BACKUP)
db_backup.update_or_insert_value(
self.mark_of_local_verification, b'1')
self.messenger.code_type = Constants.BACKUP_SCHEDULED
self.messenger.set_message = 'backup scheduled'
except Exception as err:
self.logger.exception('{}:{}'.format(self.TAG, err))
self.messenger.code_type = Constants.PULZAR_ERROR
self.messenger.set_message = str(err)
self.messenger.mark_as_failed()
else:
self.messenger.code_type = Constants.USER_ERROR
self.messenger.set_message = 'wrong request'
self.messenger.mark_as_failed()
return self.messenger
| 1.890625 | 2 |
tests/ssg_test_suite/profile.py | fduthilleul/scap-security-guide | 0 | 2346 | #!/usr/bin/env python2
from __future__ import print_function
import atexit
import logging
import sys
import ssg_test_suite.oscap
import ssg_test_suite.virt
from ssg_test_suite.rule import get_viable_profiles
from ssg_test_suite.virt import SnapshotStack
logging.getLogger(__name__).addHandler(logging.NullHandler())
def perform_profile_check(options):
"""Perform profile check.
Iterate over profiles in datastream and perform scanning of unaltered VM
using every profile according to input. Also perform remediation run.
Return value not defined, textual output and generated reports is the
result.
"""
dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
options.domain_name)
if dom is None:
sys.exit(1)
snapshot_stack = SnapshotStack(dom)
atexit.register(snapshot_stack.clear)
snapshot_stack.create('origin')
ssg_test_suite.virt.start_domain(dom)
domain_ip = ssg_test_suite.virt.determine_ip(dom)
has_worked = False
profiles = get_viable_profiles(options.target,
options.datastream,
options.benchmark_id)
if len(profiles) > 1:
snapshot_stack.create('profile')
for profile in profiles:
logging.info("Evaluation of profile {0}.".format(profile))
has_worked = True
runner = options.remediate_using
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'initial',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'remediation',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'final',
options.datastream,
options.benchmark_id,
runner=runner)
snapshot_stack.revert(delete=False)
if not has_worked:
logging.error("Nothing has been tested!")
snapshot_stack.delete()
# depending on number of profiles we have either "origin" snapshot
# still to be reverted (multiple profiles) or we are reverted
# completely (only one profile was run)
| 2.015625 | 2 |
lib/wtforms/ext/appengine/fields.py | solidaritreebiz/Solidaritree | 43 | 2347 | <filename>lib/wtforms/ext/appengine/fields.py
import decimal
import operator
import warnings
from wtforms import fields, widgets
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
label_attr=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if label_attr is not None:
warnings.warn('label_attr= will be removed in WTForms 1.1, use get_label= instead.', DeprecationWarning)
self.get_label = operator.attrgetter(label_attr)
elif get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, basestring):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.get_label(obj)
yield (key, label, self.data and ( self.data.key( ) == obj.key() ) )
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if str(self.data.key()) == str(obj.key()):
break
else:
raise ValueError(self.gettext(u'Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and unicode("\n".join(self.data)) or u''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext(u'Not a valid list'))
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = u'%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError(u'Not a valid coordinate location')
| 2.515625 | 3 |
mtrainsimulator.py | trevor-wieland/MTrainAI | 0 | 2348 | import mtrain
import numpy as np
import pandas as pd
import random
def simulate_games(num_players=4, domino_size=12, num_games=250, collect_data=True,
debug=False, players=["Random", "Greedy", "Probability", "Neural"],
file_name="PlayData/data4_12_250"):
"""
Runs the mexican train game repeatedly with different combinations of players to
generate data to be used in testing and training the neural net.
If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use
The format for the file name for this is as follows:
PlayData/data + num_players + _ + domino_size + _ + num_games + .xlsx
This spreadsheet is to be used when training the neural net.
This script has no required parameters, and will run the game with the default params if
unchanged.
If collect_data is on, the players are selected randomly each game from:
["Random", "Greedy", "Probability"]
If collect_data is off, the players are selected in order from the parameter players.
When collect_data is off: len(players) must equal num_players
Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players
"""
#Sets column names for building dataframe later on
column_names = ["round_number", "turn_number", "player_number", "play",
"t_num", "hand", "unknown", "potential_plays", "points"]
#Depending on mode of use, sets players and checks validity of player values
modes = []
if collect_data:
modes = ["Random", "Greedy", "Probability"]
else:
if not len(players) == num_players:
raise RuntimeError("len(players) must equal num_players when collect_data is off")
modes = players
#Simulates num_games of games
scores = np.ndarray((num_players, num_games))
wins = np.ndarray((num_players, num_games))
full_data = pd.DataFrame(columns=column_names)
current_index = 0
for game_num in range(0, num_games):
#Randomize players if in collect_data mode
game_modes = []
if collect_data:
for select in range(0, num_players):
game_modes.append(random.choice(modes))
else:
game_modes = modes
#Run game with parameters
results = mtrain.mexicantrain(num_players, domino_size, debug=debug,
modes=game_modes,
data_collection=collect_data,
data_index=current_index, file_name=file_name)
#If collecting data, data is stored into the dataframe
if collect_data:
current_index = results[2].index[-1] + 1
full_data = pd.concat([full_data, results[2]])
#Scores and wins are recorded into their respective arrays
for player_num in range(0, num_players):
scores[player_num, game_num] = results[0][player_num]
if results[1] == player_num:
wins[player_num, game_num] = 1
else:
wins[player_num, game_num] = 0
#Calculates performance of the players
score_averages = np.ndarray((num_players))
win_percentage = np.ndarray((num_players))
for player_num in range(0, num_players):
score_averages[player_num] = np.mean(scores[player_num, :])
win_percentage[player_num] = np.mean(wins[player_num, :])
#If collecting data, prints data to a .xlsx file
if collect_data:
filename = "PlayData/data" + str(num_players) + "_" + str(domino_size) + "_" + str(num_games) + ".xlsx"
writer = pd.ExcelWriter(filename)
full_data.to_excel(writer, "Sheet1")
writer.save()
#Prints results and returns them as well
if debug: print(score_averages)
if debug: print(win_percentage)
return score_averages, win_percentage | 3.4375 | 3 |
dml/errors.py | RGBCube/dml | 2 | 2349 | <reponame>RGBCube/dml
__all__ = ("DottedMarkupLanguageException", "DecodeError")
class DottedMarkupLanguageException(Exception):
"""Base class for all exceptions in this module."""
pass
class DecodeError(DottedMarkupLanguageException):
"""Raised when there is an error decoding a string."""
pass
| 2.40625 | 2 |
licenseplates/dataset.py | VaranRohila/apn | 0 | 2350 | ##############################################################################
#
# Below code is inspired on
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/pascal_voc.py
# --------------------------------------------------------
# Detectron2
# Licensed under the Apache 2.0 license.
# --------------------------------------------------------
from fvcore.common.file_io import PathManager
import os
import numpy as np
import xml.etree.ElementTree as ET
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
__all__ = ["register_licenseplates_voc"]
CLASS_NAMES = [
"license_plate",
]
def load_voc_instances(dirname: str, split: str):
"""
Load licenseplates VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "annotations", "images"
split (str): one of "train", "test"
"""
with PathManager.open(os.path.join(dirname, split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
dicts = []
for fileid in fileids:
anno_file = os.path.join(dirname, "annotations", fileid + ".xml")
jpeg_file = os.path.join(dirname, "images", fileid + ".jpg")
tree = ET.parse(anno_file)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
instances.append(
{"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_licenseplates_voc(name, dirname, split):
DatasetCatalog.register(name,
lambda: load_voc_instances(dirname, split))
MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES,
dirname=dirname,
split=split)
if __name__ == "__main__":
import random
import cv2
from detectron2.utils.visualizer import Visualizer
import argparse
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("--split", default="train")
ap.add_argument("--samples", type=int, default=10)
ap.add_argument("--scale", type=float, default=1.0)
args = ap.parse_args()
dataset_name = f"licenseplates_{args.split}"
register_licenseplates_voc(dataset_name, "datasets/licenseplates", args.split)
dataset_dicts = DatasetCatalog.get(dataset_name)
for d in random.sample(dataset_dicts, args.samples):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1],
metadata=MetadataCatalog.get(dataset_name),
scale=args.scale)
vis = visualizer.draw_dataset_dict(d)
cv2.imshow(dataset_name, vis.get_image()[:, :, ::-1])
# Exit? Press ESC
if cv2.waitKey(0) & 0xFF == 27:
break
cv2.destroyAllWindows()
| 2.265625 | 2 |
docs/examples/pytorch/resnet50/scripts/test_read_speed.py | RogerChern/DALI | 0 | 2351 | import glob
import time
import random
filelist = glob.glob('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*')
random.shuffle(filelist)
begin = time.time()
for i, f in enumerate(filelist):
if i == 10000:
break
with open(f, "rb") as fin:
result = fin.read()
end = time.time()
print("%.1f images/s" % (10000 / (end - begin))) | 2.484375 | 2 |
ocellaris/solver_parts/boundary_conditions/dirichlet.py | TormodLandet/Ocellaris | 1 | 2352 | # Copyright (C) 2015-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
from . import register_boundary_condition, BoundaryConditionCreator
from ocellaris.utils import (
CodedExpression,
OcellarisCppExpression,
OcellarisError,
verify_field_variable_definition,
)
class OcellarisDirichletBC(dolfin.DirichletBC):
def __init__(
self, simulation, V, value, subdomain_marker, subdomain_id, updater=None
):
"""
A simple storage class for Dirichlet boundary conditions
"""
super().__init__(
V, value, subdomain_marker, subdomain_id, method='geometric'
)
self.simulation = simulation
self._value = value
self.subdomain_marker = subdomain_marker
self.subdomain_id = subdomain_id
self._updater = updater
def func(self):
"""
The boundary value derivative function
"""
return self._value
def ds(self):
"""
Returns the ds measure of the subdomain
"""
return self.simulation.data['ds'](self.subdomain_id)
def copy_and_change_function_space(self, V):
"""
Return a copy with a new function space. Used when converting from
BCs for a segregated solver (default) to BCs for a coupled solver
"""
return OcellarisDirichletBC(
self.simulation, V, self._value, self.subdomain_marker, self.subdomain_id
)
def update(self):
"""
Update the time and other parameters used in the BC.
This is used every timestep and for all RK substeps
"""
if self._updater:
self._updater(
self.simulation.timestep, self.simulation.time, self.simulation.dt
)
def __repr__(self):
return '<OcellarisDirichletBC on subdomain %d>' % self.subdomain_id
@register_boundary_condition('ConstantValue')
class ConstantDirichletBoundary(BoundaryConditionCreator):
description = 'A prescribed constant value Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with constant value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
value = inp_dict.get_value('value', required_type='any')
if isinstance(value, list):
assert len(value) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, value[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, value, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, value, subdomains, subdomain_id):
"""
Add a Dirichlet condition to this variable
"""
if not isinstance(value, (float, int)):
raise OcellarisError(
'Error in ConstantValue BC for %s' % var_name,
'The value %r is not a number' % value,
)
df_value = dolfin.Constant(value)
# Store the boundary condition for use in the solver
bc = OcellarisDirichletBC(
self.simulation, self.func_space, df_value, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Constant value %r for %s' % (value, var_name))
@register_boundary_condition('CodedValue')
class CodedDirichletBoundary(BoundaryConditionCreator):
description = 'A coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
description = 'coded value boundary condition for %s' % name
sub_code = inp_dict.get_value('code/%d' % d, required_type='string')
expr = CodedExpression(simulation, sub_code, description)
self.register_dirichlet_condition(name, expr, subdomains, subdomain_id)
else:
description = 'coded value boundary condition for %s' % var_name
expr = CodedExpression(simulation, code, description)
self.register_dirichlet_condition(var_name, expr, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Coded value for %s' % var_name)
@register_boundary_condition('CppCodedValue')
class CppCodedDirichletBoundary(BoundaryConditionCreator):
description = 'A C++ coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with C++ coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('cpp_code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
sub_code = inp_dict.get_value('cpp_code/%d' % d, required_type='string')
self.register_dirichlet_condition(
name, sub_code, subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, code, subdomains, subdomain_id)
def register_dirichlet_condition(
self, var_name, cpp_code, subdomains, subdomain_id
):
"""
Store the boundary condition for use in the solver
"""
description = 'boundary condititon for %s' % var_name
P = self.func_space.ufl_element().degree()
expr, updater = OcellarisCppExpression(
self.simulation, cpp_code, description, P, return_updater=True
)
bc = OcellarisDirichletBC(
self.simulation,
self.func_space,
expr,
subdomains,
subdomain_id,
updater=updater,
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' C++ coded value for %s' % var_name)
@register_boundary_condition('FieldFunction')
class FieldFunctionDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition with values from a field function'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
if isinstance(vardef, list):
assert len(vardef) == simulation.ndim
exprs = [
verify_field_variable_definition(simulation, vd, description)
for vd in vardef
]
else:
expr = verify_field_variable_definition(simulation, vardef, description)
if expr.ufl_shape != ():
assert expr.ufl_shape == (
simulation.ndim,
), 'Expected shape %r got %r' % ((simulation.ndim,), expr.ufl_shape)
exprs = [expr[d] for d in range(simulation.ndim)]
else:
exprs = [expr]
# Register BCs
if len(exprs) > 1:
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, exprs[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(
var_name, exprs[0], subdomains, subdomain_id
)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
assert expr.ufl_shape == ()
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field function value for %s' % var_name)
@register_boundary_condition('FieldVelocityValve')
class FieldVelocityValveDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition that compensates for non-zero total flux of a known velocity field'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
# A var_name like "u0" should be given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
self.velocity = verify_field_variable_definition(
simulation, vardef, description
)
field = simulation.fields[vardef.split('/')[0]]
# The expression value is updated as the field is changed
inp_dict.get_value('function', required_type='any')
field.register_dependent_field(self)
self.flux = dolfin.Constant(1.0)
# Create the
bc = OcellarisDirichletBC(
self.simulation, self.func_space, self.flux, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field velocity valve for %s' % var_name)
# Compute the region area, then update the flux
mesh = simulation.data['mesh']
self.area = dolfin.assemble(self.flux * bc.ds()(domain=mesh))
self.region_names = inp_dict.get_value('regions', required_type='list(string)')
self.update()
def update(self, timestep_number=None, t=None, dt=None):
"""
The main field has changed, update our flux to make the total sum to zero
"""
regions = self.simulation.data['boundary']
mesh = self.simulation.data['mesh']
n = dolfin.FacetNormal(mesh)
flux = 0
count = 0
for region in regions:
if region.name in self.region_names:
f = dolfin.dot(self.velocity, n) * region.ds()
flux += dolfin.assemble(f)
count += 1
assert count == len(self.region_names)
# FIXME: assumes n is pointing outwards along the axis in the positive
# direction in this boundary region
self.flux.assign(dolfin.Constant(-flux / self.area))
| 2.1875 | 2 |
count_split_inversions/test_count_split_inversions.py | abaldwin/algorithms | 0 | 2353 | import unittest
from count_split_inversions import count_inversions
class TestCountSplitInversions(unittest.TestCase):
def test_count_inversions(self):
input = [1, 3, 5, 2, 4, 6]
result = count_inversions(input)
self.assertEqual(result, 3)
if __name__ == '__main__':
unittest.main()
| 3.3125 | 3 |
python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py | Forest216/BigDL | 0 | 2354 | <filename>python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import tempfile
import os
from unittest import TestCase
import numpy as np
import tensorflow as tf
def create_data(tf_data=False, batch_size=32):
train_num_samples = 1000
test_num_samples = 400
input_feature_num = 10
output_feature_num = 2
past_seq_len = 10
future_seq_len = 2
def get_x_y(num_sample):
x = np.random.randn(num_sample, past_seq_len, input_feature_num)
y = np.random.randn(num_sample, future_seq_len, output_feature_num)
return x, y
train_data = get_x_y(train_num_samples)
test_data = get_x_y(test_num_samples)
if tf_data:
from_tensor_slices = tf.data.Dataset.from_tensor_slices
train_data = from_tensor_slices(train_data).cache()\
.shuffle(train_num_samples)\
.batch(batch_size)\
.prefetch(tf.data.AUTOTUNE)
test_data = from_tensor_slices(test_data).cache()\
.batch(batch_size)\
.prefetch(tf.data.AUTOTUNE)
return train_data, test_data
@pytest.mark.skipif(tf.__version__ < '2.0.0', reason="Run only when tf > 2.0.0.")
class TestSeq2SeqForecaster(TestCase):
def setUp(self):
from bigdl.chronos.forecaster.tf.seq2seq_forecaster import Seq2SeqForecaster
self.forecaster = Seq2SeqForecaster(past_seq_len=10,
future_seq_len=2,
input_feature_num=10,
output_feature_num=2)
def tearDown(self):
pass
def test_seq2seq_fit_predict_evaluate(self):
train_data, test_data = create_data()
self.forecaster.fit(train_data,
epochs=2,
batch_size=32)
yhat = self.forecaster.predict(test_data[0])
assert yhat.shape == (400, 2, 2)
mse = self.forecaster.evaluate(test_data, multioutput="raw_values")
assert mse[0].shape == test_data[-1].shape[1:]
def test_seq2seq_fit_tf_data(self):
train_data, test_data = create_data(tf_data=True)
self.forecaster.fit(train_data,
epochs=2)
yhat = self.forecaster.predict(test_data)
assert yhat.shape == (400, 2, 2)
def test_seq2seq_save_load(self):
train_data, test_data = create_data()
self.forecaster.fit(train_data,
epochs=2,
batch_size=32)
yhat = self.forecaster.predict(test_data[0])
with tempfile.TemporaryDirectory() as tmp_dir_file:
tmp_dir_file = os.path.join(tmp_dir_file, 'seq2seq.ckpt')
self.forecaster.save(tmp_dir_file)
self.forecaster.load(tmp_dir_file)
from bigdl.chronos.model.tf2.Seq2Seq_keras import LSTMSeq2Seq
assert isinstance(self.forecaster.internal, LSTMSeq2Seq)
load_model_yhat = self.forecaster.predict(test_data[0])
assert yhat.shape == (400, 2, 2)
np.testing.assert_almost_equal(yhat, load_model_yhat, decimal=5)
if __name__ == '__main__':
pytest.main([__file__])
| 2.390625 | 2 |
examples/SubOrbitalFlight.py | nicolaikd/sl-ksp | 7 | 2355 | import time
import krpc
conn = krpc.connect(name='Sub-orbital flight')
vessel = conn.space_center.active_vessel
vessel.auto_pilot.target_pitch_and_heading(90, 90)
vessel.auto_pilot.engage()
vessel.control.throttle = 1
time.sleep(1)
print('Launch!')
vessel.control.activate_next_stage()
fuel_amount = conn.get_call(vessel.resources.amount, 'SolidFuel')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(fuel_amount),
conn.krpc.Expression.constant_float(0.1))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Booster separation')
vessel.control.activate_next_stage()
mean_altitude = conn.get_call(getattr, vessel.flight(), 'mean_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(mean_altitude),
conn.krpc.Expression.constant_double(10000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Gravity turn')
vessel.auto_pilot.target_pitch_and_heading(60, 90)
apoapsis_altitude = conn.get_call(getattr, vessel.orbit, 'apoapsis_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(apoapsis_altitude),
conn.krpc.Expression.constant_double(100000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Launch stage separation')
vessel.control.throttle = 0
time.sleep(1)
vessel.control.activate_next_stage()
vessel.auto_pilot.disengage()
srf_altitude = conn.get_call(getattr, vessel.flight(), 'surface_altitude')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(srf_altitude),
conn.krpc.Expression.constant_double(1000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
vessel.control.activate_next_stage()
while vessel.flight(vessel.orbit.body.reference_frame).vertical_speed < -0.1:
print('Altitude = %.1f meters' % vessel.flight().surface_altitude)
time.sleep(1)
print('Landed!')
| 2.5 | 2 |
part02/part02-e11_rows_and_columns/src/rows_and_columns.py | davide-butera/data-analysis-with-python | 0 | 2356 | <gh_stars>0
#!/usr/bin/env python3
import numpy as np
def get_rows(a):
return list(a)
def get_columns(a):
return list(a.T)
def main():
np.random.seed(0)
a=np.random.randint(0,10, (4,4))
print("a:", a)
print("Rows:", get_rows(a))
print("Columns:", get_columns(a))
if __name__ == "__main__":
main()
| 2.8125 | 3 |
ramp-database/ramp_database/tools/leaderboard.py | kegl/ramp-board | 0 | 2357 | from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from ..model.event import Event
from ..model.event import EventTeam
from ..model.submission import Submission
from ..model.team import Team
from .team import get_event_team_by_name
from .submission import get_bagged_scores
from .submission import get_scores
from .submission import get_submission_max_ram
from .submission import get_time
width = -1 if LooseVersion(pd.__version__) < LooseVersion("1.0.0") else None
pd.set_option('display.max_colwidth', width)
def _compute_leaderboard(session, submissions, leaderboard_type, event_name,
with_links=True):
"""Format the leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
with_links : bool
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : dataframe
The leaderboard in a dataframe format.
"""
record_score = []
event = session.query(Event).filter_by(name=event_name).one()
map_score_precision = {score_type.name: score_type.precision
for score_type in event.score_types}
for sub in submissions:
# take only max n bag
df_scores_bag = get_bagged_scores(session, sub.id)
highest_level = df_scores_bag.index.get_level_values('n_bag').max()
df_scores_bag = df_scores_bag.loc[(slice(None), highest_level), :]
df_scores_bag.index = df_scores_bag.index.droplevel('n_bag')
df_scores_bag = df_scores_bag.round(map_score_precision)
df_scores = get_scores(session, sub.id)
df_scores = df_scores.round(map_score_precision)
df_time = get_time(session, sub.id)
df_time = df_time.stack().to_frame()
df_time.index = df_time.index.set_names(['fold', 'step'])
df_time = df_time.rename(columns={0: 'time'})
df_time = df_time.sum(axis=0, level="step").T
df_scores_mean = df_scores.groupby('step').mean()
df_scores_std = df_scores.groupby('step').std()
# select only the validation and testing steps and rename them to
# public and private
map_renaming = {'valid': 'public', 'test': 'private'}
df_scores_mean = (df_scores_mean.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_std = (df_scores_std.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_bag = (df_scores_bag.rename(index=map_renaming)
.stack().to_frame().T)
df = pd.concat([df_scores_bag, df_scores_mean, df_scores_std], axis=1,
keys=['bag', 'mean', 'std'])
df.columns = df.columns.set_names(['stat', 'set', 'score'])
# change the multi-index into a stacked index
df.columns = df.columns.map(lambda x: " ".join(x))
# add the aggregated time information
df_time.index = df.index
df_time = df_time.rename(
columns={'train': 'train time [s]',
'valid': 'validation time [s]',
'test': 'test time [s]'}
)
df = pd.concat([df, df_time], axis=1)
if leaderboard_type == 'private':
df['submission ID'] = sub.basename.replace('submission_', '')
df['team'] = sub.team.name
df['submission'] = sub.name_with_link if with_links else sub.name
df['contributivity'] = int(round(100 * sub.contributivity))
df['historical contributivity'] = int(round(
100 * sub.historical_contributivity))
df['max RAM [MB]'] = get_submission_max_ram(session, sub.id)
df['submitted at (UTC)'] = pd.Timestamp(sub.submission_timestamp)
record_score.append(df)
# stack all the records
df = pd.concat(record_score, axis=0, ignore_index=True, sort=False)
# keep only second precision for the time stamp
df['submitted at (UTC)'] = df['submitted at (UTC)'].astype('datetime64[s]')
# reordered the column
stats_order = (['bag', 'mean', 'std'] if leaderboard_type == 'private'
else ['bag'])
dataset_order = (['public', 'private'] if leaderboard_type == 'private'
else ['public'])
score_order = ([event.official_score_name] +
[score_type.name for score_type in event.score_types
if score_type.name != event.official_score_name])
score_list = [
'{} {} {}'.format(stat, dataset, score)
for dataset, score, stat in product(dataset_order,
score_order,
stats_order)
]
# Only display train and validation time for the public leaderboard
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_ordered = (
['team', 'submission'] +
score_list +
['contributivity', 'historical contributivity'] +
time_list +
['max RAM [MB]', 'submitted at (UTC)']
)
if leaderboard_type == "private":
col_ordered = ["submission ID"] + col_ordered
df = df[col_ordered]
# check if the contributivity columns are null
contrib_columns = ['contributivity', 'historical contributivity']
if (df[contrib_columns] == 0).all(axis=0).all():
df = df.drop(columns=contrib_columns)
df = df.sort_values(
"bag {} {}".format(leaderboard_type, event.official_score_name),
ascending=event.get_official_score_type(session).is_lower_the_better
)
# rename the column name for the public leaderboard
if leaderboard_type == 'public':
df = df.rename(columns={
key: value for key, value in zip(score_list, score_order)
})
return df
def _compute_competition_leaderboard(session, submissions, leaderboard_type,
event_name):
"""Format the competition leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
Returns
-------
competition_leaderboard : dataframe
The competition leaderboard in a dataframe format.
"""
event = session.query(Event).filter_by(name=event_name).one()
score_type = event.get_official_score_type(session)
score_name = event.official_score_name
private_leaderboard = _compute_leaderboard(session, submissions, 'private',
event_name, with_links=False)
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_selected_private = (['team', 'submission'] +
['bag private ' + score_name,
'bag public ' + score_name] +
time_list +
['submitted at (UTC)'])
leaderboard_df = private_leaderboard[col_selected_private]
leaderboard_df = leaderboard_df.rename(
columns={'bag private ' + score_name: 'private ' + score_name,
'bag public ' + score_name: 'public ' + score_name}
)
# select best submission for each team
best_df = (leaderboard_df.groupby('team').min()
if score_type.is_lower_the_better
else leaderboard_df.groupby('team').max())
best_df = best_df[['public ' + score_name]].reset_index()
best_df['best'] = True
# merge to get a best indicator column then select best
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'public ' + score_name],
right_on=['team', 'public ' + score_name]
)
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# dealing with ties: we need the lowest timestamp
best_df = leaderboard_df.groupby('team').min()
best_df = best_df[['submitted at (UTC)']].reset_index()
best_df['best'] = True
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'submitted at (UTC)'],
right_on=['team', 'submitted at (UTC)'])
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# sort by public score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['public ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['public rank'] = np.arange(len(leaderboard_df)) + 1
# sort by private score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['private ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['private rank'] = np.arange(len(leaderboard_df)) + 1
leaderboard_df['move'] = \
leaderboard_df['public rank'] - leaderboard_df['private rank']
leaderboard_df['move'] = [
'{:+d}'.format(m) if m != 0 else '-' for m in leaderboard_df['move']]
col_selected = (
[leaderboard_type + ' rank', 'team', 'submission',
leaderboard_type + ' ' + score_name] +
time_list +
['submitted at (UTC)']
)
if leaderboard_type == 'private':
col_selected.insert(1, 'move')
df = leaderboard_df[col_selected]
df = df.rename(columns={
leaderboard_type + ' ' + score_name: score_name,
leaderboard_type + ' rank': 'rank'
})
df = df.sort_values(by='rank')
return df
def get_leaderboard(session, leaderboard_type, event_name, user_name=None,
with_links=True):
"""Get a leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
leaderboard_type : {'public', 'private', 'failed', 'new', \
'public competition', 'private competition'}
The type of leaderboard to generate.
event_name : str
The event name.
user_name : None or str, default is None
The user name. If None, scores from all users will be queried. This
parameter is discarded when requesting the competition leaderboard.
with_links : bool, default is True
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : str
The leaderboard in HTML format.
"""
q = (session.query(Submission)
.filter(Event.id == EventTeam.event_id)
.filter(Team.id == EventTeam.team_id)
.filter(EventTeam.id == Submission.event_team_id)
.filter(Event.name == event_name))
if user_name is not None:
q = q.filter(Team.name == user_name)
submissions = q.all()
submission_filter = {'public': 'is_public_leaderboard',
'private': 'is_private_leaderboard',
'failed': 'is_error',
'new': 'is_new',
'public competition': 'is_in_competition',
'private competition': 'is_in_competition'}
submissions = [sub for sub in submissions
if (getattr(sub, submission_filter[leaderboard_type]) and
sub.is_not_sandbox)]
if not submissions:
return None
if leaderboard_type in ['public', 'private']:
df = _compute_leaderboard(
session, submissions, leaderboard_type, event_name,
with_links=with_links
)
elif leaderboard_type in ['new', 'failed']:
if leaderboard_type == 'new':
columns = ['team', 'submission', 'submitted at (UTC)', 'state']
else:
columns = ['team', 'submission', 'submitted at (UTC)', 'error']
# we rely on the zip function ignore the submission state if the error
# column was not appended
data = [{
column: value for column, value in zip(
columns,
[sub.event_team.team.name,
sub.name_with_link,
pd.Timestamp(sub.submission_timestamp),
(sub.state_with_link if leaderboard_type == 'failed'
else sub.state)])
} for sub in submissions]
df = pd.DataFrame(data, columns=columns)
else:
# make some extra filtering
submissions = [sub for sub in submissions if sub.is_public_leaderboard]
if not submissions:
return None
competition_type = ('public' if 'public' in leaderboard_type
else 'private')
df = _compute_competition_leaderboard(
session, submissions, competition_type, event_name
)
df_html = df.to_html(escape=False, index=False, max_cols=None,
max_rows=None, justify='left')
df_html = '<thead> {} </tbody>'.format(
df_html.split('<thead>')[1].split('</tbody>')[0]
)
return df_html
def update_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
if not new_only:
event.private_leaderboard_html = get_leaderboard(
session, 'private', event_name
)
event.public_leaderboard_html_with_links = get_leaderboard(
session, 'public', event_name
)
event.public_leaderboard_html_no_links = get_leaderboard(
session, 'public', event_name, with_links=False
)
event.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name
)
event.public_competition_leaderboard_html = get_leaderboard(
session, 'public competition', event_name
)
event.private_competition_leaderboard_html = get_leaderboard(
session, 'private competition', event_name
)
event.new_leaderboard_html = get_leaderboard(
session, 'new', event_name
)
session.commit()
def update_user_leaderboards(session, event_name, user_name,
new_only=False):
"""Update the of a user leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
user_name : str
The user name. If None, scores from all users will be queried.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event_team = get_event_team_by_name(session, event_name, user_name)
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
def update_all_user_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for all users for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
event_teams = session.query(EventTeam).filter_by(event=event).all()
for event_team in event_teams:
user_name = event_team.team.name
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
| 2.4375 | 2 |
projects/boring_stuff/03_functions/ZigZag.py | SavantLogics/Visual_Studio_Python_Scripts-master | 0 | 2358 | #Automate the Boring Stuff with Python
import time, sys
indent = 0 # How many spaces to indent
indent_Increasing = True # Whether the indentation is increasing or not
try:
while True: # The main program loop
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for 1/10th of a second
if indent_Increasing:
indent = indent + 1
if indent == 20:
indent_Increasing = False
else:
indent = indent - 1
if indent == 0:
indent_Increasing = True
except KeyboardInterrupt():
sys.exit() | 3.859375 | 4 |
examples/add_compensation_to_sample.py | whitews/ReFlowRESTClient | 0 | 2359 | import getpass
import sys
import json
from reflowrestclient.utils import *
host = raw_input('Host: ')
username = raw_input('Username: ')
password = <PASSWORD>('Password: ')
token = get_token(host, username, password)
if token:
print "Authentication successful"
print '=' * 40
else:
print "No token for you!!!"
sys.exit()
def start():
# Projects
project_list = get_projects(host, token)
for i, result in enumerate(project_list['data']):
print i, ':', result['project_name']
project_choice = raw_input('Choose Project:')
project = project_list['data'][int(project_choice)]
# Subjects
subject_list = get_subjects(host, token, project_pk=project['id'])
for i, result in enumerate(subject_list['data']):
print i, ':', result['subject_id']
subject_choice = raw_input('Choose Subject (leave blank for all subjects): ')
subject = None
if subject_choice:
subject = subject_list['data'][int(subject_choice)]
# Sites
site_list = get_sites(host, token, project_pk=project['id'])
if not site_list:
sys.exit('There are no sites')
for i, result in enumerate(site_list['data']):
print i, ':', result['site_name']
site_choice = raw_input('Choose Site (required): ')
site = site_list['data'][int(site_choice)]
# Samples
sample_args = [host, token]
sample_kwargs = {'site_pk': site['id']}
if subject:
sample_kwargs['subject_pk'] = subject['id']
sample_list = get_samples(*sample_args, **sample_kwargs)
if not sample_list:
sys.exit('There are no samples')
for i, result in enumerate(sample_list['data']):
print i, ':', result['original_filename']
sample_choice = raw_input('Choose Sample (leave blank for all samples): ')
sample = None
if sample_choice:
sample = sample_list['data'][int(sample_choice)]
# Compensation
compensation_list = get_compensations(host, token, site_pk=site['id'], project_pk=project['id'])
if not compensation_list:
sys.exit('There are no compensations')
for i, result in enumerate(compensation_list['data']):
print i, ':', result['original_filename']
compensation_choice = raw_input('Choose Compensation (required): ')
compensation = compensation_list['data'][int(compensation_choice)]
# Now have user verify information
print '=' * 40
print 'You chose to add this compensation to these samples:'
print '\Compensation: %s' % compensation['original_filename']
print 'Samples:'
if sample:
print '\t%s' % sample['original_filename']
else:
for s in sample_list['data']:
print '\t%s' % s['original_filename']
print '=' * 40
apply_choice = None
while apply_choice not in ['continue', 'exit']:
apply_choice = raw_input("Type 'continue' to upload, 'exit' abort: ")
if apply_choice == 'exit':
sys.exit()
print 'continue'
if sample:
response_dict = add_compensation_to_sample(
host,
token,
sample_pk=str(sample['id']),
compensation_pk=str(compensation['id'])
)
print "Response: ", response_dict['status'], response_dict['reason']
print 'Data: '
print json.dumps(response_dict['data'], indent=4)
else:
for sample in sample_list['data']:
response_dict = add_compensation_to_sample(
host,
token,
sample_pk=str(sample['id']),
compensation_pk=str(compensation['id']),
)
print "Response: ", response_dict['status'], response_dict['reason']
print 'Data: '
print json.dumps(response_dict['data'], indent=4)
while True:
start() | 2.796875 | 3 |
accountifie/toolkit/urls.py | imcallister/accountifie | 4 | 2360 | from django.conf import settings
from django.conf.urls import url, static
from . import views
from . import jobs
urlpatterns = [
url(r'^choose_company/(?P<company_id>.*)/$', views.choose_company, name='choose_company'),
url(r'^cleanlogs/$', jobs.cleanlogs, name='cleanlogs'),
url(r'^primecache/$', jobs.primecache, name='primecache'),
url(r'^dump_fixtures/$', views.dump_fixtures),
]
| 1.640625 | 2 |
setup.py | sequentialchaos/i3-workspace-swap | 0 | 2361 | <gh_stars>0
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="i3-workspace-swap",
description='A python utility swap the content of two workplaces in i3wm',
long_description=long_description,
long_description_content_type="text/markdown",
version="1.1.0",
url='https://github.com/einzigartigername/i3-workspace-swap',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
packages=setuptools.find_packages(),
scripts=['i3-workspace-swap'],
install_requires=['i3ipc'],
classifiers=[
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
'Programming Language :: Python :: 3'
],
python_requires='>=3.6',
)
| 1.59375 | 2 |
ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py | genemerewether/fprime | 5 | 2362 | <gh_stars>1-10
#
# Copyright 2004-2016, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship
# acknowledged. Any commercial use must be negotiated with the Office
# of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws and
# regulations. By accepting this document, the user agrees to comply
# with all U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign
# countries or providing access to foreign persons.
#
from __future__ import print_function
import os
from genmsg import MsgGenerationException
#from . name import *
## :param type_name outdir: Full path to output directory
## :returns int: status. 0 if successful
def write_modmk(outdir): #, msg_types, srv_types):
if not os.path.isdir(outdir):
#TODO: warn?
return 0
xml_in_dir = set([f for f in os.listdir(outdir)
if f.endswith('.xml')])
_write_modmk(outdir, sorted(xml_in_dir))
# TODO(mereweth) if we want to independently specify the generated XML files
# generated_xml = [_msg_serializable_xml_name(f) for f in sorted(msg_types)]
# generated_xml.extend([_port_xml_name(f) for f in sorted(msg_types)]
# write_msg_modmk(outdir, generated_xml)
# generated_xml = [_srv_serializable_xml_name(f) for f in sorted(srv_types)]
# generated_xml.extend([_port_xml_name(f) for f in sorted(srv_types)]
# write_msg_modmk(outdir, generated_xml)
return 0
def _write_modmk(outdir, generated_xml):
if not os.path.exists(outdir):
os.makedirs(outdir)
elif not os.path.isdir(outdir):
raise MsgGenerationException("file preventing the creating of Fprime directory: %s"%dir)
p = os.path.join(outdir, 'mod.mk')
with open(p, 'w') as f:
f.write('SRC = \\\n')
if len(generated_xml) != 0:
for xml in generated_xml[:-1]:
f.write('%s \\\n'%xml)
f.write('%s\n'%generated_xml[-1])
return 0
| 1.773438 | 2 |
tests/test_compare.py | fool65c/jupytext | 1 | 2363 | import pytest
from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell
from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion
def test_raise_on_different_metadata():
ref = new_notebook(metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}},
cells=[new_markdown_cell('Cell one')])
test = new_notebook(metadata={'kernelspec': {'language': 'R', 'name': 'R', 'display_name': 'R'}},
cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_type(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_raw_cell('Cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_content(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Modified cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
def test_raise_on_incomplete_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_does_raise_on_split_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_markdown_cell('second line')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_raise_on_different_cell_metadata():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', metadata={'metakey': 'value'})])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_count(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1')])
test = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', raise_on_first_difference=raise_on_first_difference)
with pytest.raises(NotebookDifference):
compare_notebooks(test, ref, 'py:light', raise_on_first_difference=raise_on_first_difference)
def test_does_not_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n ')])
test = new_notebook(cells=[new_code_cell('1+1')])
compare_notebooks(ref, test, 'py:light')
def test_strict_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n')])
test = new_notebook(cells=[new_code_cell('1+1')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', allow_expected_differences=False)
def test_dont_raise_on_different_outputs():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_outputs(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', compare_outputs=True, raise_on_first_difference=raise_on_first_difference)
def test_test_round_trip_conversion():
notebook = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])], metadata={'main_language': 'python'})
round_trip_conversion(notebook, {'extension': '.py'}, update=True)
def test_mutiple_cells_differ():
nb1 = new_notebook(cells=[new_code_cell(''),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1+1'),
new_code_cell('2\n2')])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert 'Cells 1,2 differ' in exception_info.value.args[0]
def test_cell_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata1'})])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata2'})])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert "Cell metadata 'additional' differ" in exception_info.value.args[0]
def test_notebook_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')],
metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}})
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False, )
assert "Notebook metadata differ" in exception_info.value.args[0]
| 2.140625 | 2 |
Cogs/Actions.py | MrAngelDo6pa/MedBotS | 2 | 2364 | import asyncio
import discord
import random
import datetime
from discord.ext import commands
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot
bot.add_cog(Actions(bot))
class Actions(commands.Cog):
## class that handles storing and computing action messages
class actionable:
## these should be filled in the override class. any {} are replaced with target member's name
nothingList = [] # when you call without any arguments
botList = [] # when the action is done at the bot
selfList = [] # when the action is done at the user who called it
memberList = [] # when the action is done toward another member
itemList = [] # when the action is done on a string of text that is not a member
def computeAction(self, bot, ctx, target):
'''return a message based on the context and argument of the command'''
mesg = ""
if not target: # no arguments
mesg = random.choice(self.nothingList)
else:
targetMember = DisplayName.memberForName(target, ctx.message.guild)
if targetMember:
if self.botList and targetMember.id == bot.user.id: # actioning the bot
mesg = random.choice(self.botList) # if botList is empty we fail over to the member list
elif self.selfList and targetMember.id == ctx.message.author.id: # actioning themselves
mesg = random.choice(self.selfList)
else: # actioning another user
mesg = random.choice(self.memberList).replace("{}",DisplayName.name(targetMember))
else: # actioning an item
mesg = random.choice(self.itemList)
if '{}' in mesg:
mesg = mesg.format(target)
mesgFull = '*{}*, {}'.format(DisplayName.name(ctx.message.author), mesg)
mesgFull = Nullify.clean(mesgFull)
return mesgFull
## static definitions of all the action messages
class eating(actionable):
nothingList = [ 'you sit quietly and eat *nothing*...',
'you\'re *sure* there was something to eat, so you just chew on nothingness...',
'there comes a time when you need to realize that you\'re just chewing nothing for the sake of chewing. That time is now.']
botList = [ 'you try to eat *me* - but unfortunately, I saw it coming - your jaw hangs open as I deftly sidestep.',
'your mouth hangs open for a brief second before you realize that *I\'m* eating *you*.',
'I\'m a bot. You can\'t eat me.',
'your jaw clamps down on... wait... on nothing, because I\'m *digital!*.',
'what kind of bot would I be if I let you eat me?']
selfList = ['you clamp down on your own forearm - not surprisingly, it hurts.',
'you place a finger into your mouth, but *just can\'t* force yourself to bite down.',
'you happily munch away, but can now only wave with your left hand.',
'wait - you\'re not a sandwich!',
'you might not be the smartest...']
memberList = [ 'you unhinge your jaw and consume *{}* in one bite.',
'you try to eat *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you take a quick bite out of *{}*. They probably didn\'t even notice.',
'you sink your teeth into *{}\'s* shoulder - they turn to face you, eyes wide as you try your best to scurry away and hide.',
'your jaw clamps down on *{}* - a satisfying *crunch* emanates as you finish your newest meal.']
itemList = [ 'you take a big chunk out of *{}*. *Delicious.*',
'your teeth sink into *{}* - it tastes satisfying.',
'you rip hungrily into *{}*, tearing it to bits!',
'you just can\'t bring yourself to eat *{}* - so you just hold it for awhile...',
'you attempt to bite into *{}*, but you\'re clumsier than you remember - and fail...']
class drinking(actionable):
nothingList = [ 'you stare at your glass full of *nothing*...',
'that cup must\'ve had something in it, so you drink *nothing*...',
'you should probably just go get a drink.',
'that desk looks pretty empty',
'are you sure you know what drinking is?',
'you desperatly search for something to drink']
botList = [ 'you try to drink *me*, but I dodge your straw.',
'You search for me, only to realise that *I* am already drinking you!',
'I\'m a bot. You can\'t drink me.',
'you stick a straw in... wait... in nothing, because I\'m *digital!*.',
'what do you think I am to let you drink me?',
'I don\'t think you would like the taste of me.',
'you can\'t drink me, I\'m a machine!']
selfList = ['you stab yourself with a straw - not surprisingly, it hurts.',
'you fit yourself in to a cup, but you just can\'t do it.',
'you happily drink away, but you are now very floppy.',
'wait - you\'re not a drink!',
'you might not be the smartest...',
'you might have some issues.',
'you try to drink yourself.',
'why would you drink yourself?']
memberList = [ 'you grab your lucky straw and empty *{}* in one sip.',
'you try to drink *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you drink a small sip of *{}*. They probably didn\'t even notice.',
'you stab your straw into *{}\'s* shoulder - You run away as they run after you.',
'you happily drink away - *{}* starts to look like an empty Capri Sun package.',
'you are thirsty - *{}* sacrifices themself involuntarily.',
'somehow you end up emptying *{}*.']
itemList = ['you take a big sip of *{}*. *Delicious.*',
'your straw sinks into *{}* - it tastes satisfying.',
'you thirstly guzzle *{}*, it\'s lovely!',
'you just can\'t bring yourself to drink *{}* - so you just hold it for awhile...',
'you attempt to drain *{}*, but you\'re clumsier than you remember - and fail...',
'you drink *{}*.',
'*{}* dries up from your drinking.',
'*{}* starts resembling the Aral Sea.']
class booping(actionable):
nothingList = [ 'you stretch out your hand in the air, but there\'s nothing there...',
'you try and find someone to boop, but there\'s no one there.',
'you look around the channel for someone to boop.',
'you eye all the heads in the room, just waiting to be booped.',
'are you sure you have someone to boop?',
'I get it. You want to boop *someone*.']
selfList = ['you boop yourself on the nose with your finger.',
'you try to boop your head, but your hand gets lost along the way.',
'you happily boop yourself, but you are now very giddy.',
'wait - are you sure you want to boop yourself?',
'you might not be the smartest...',
'you might have some issues.',
'you try to boop yourself.',
'why would you boop yourself?']
memberList = [ 'you outstretch your lucky finger and boop *{}* in one go.',
'you try to boop *{}*, but you just can\'t quite do it - you miss their head, the taste of failure hanging stuck to your hand...',
'you sneak a boop onto *{}*. They probably didn\'t even notice.',
'you poke your hand onto *{}\'s* hand - You run away as they run after you.',
'you happily drum your fingers away - *{}* starts to look annoyed.',
'you\'re feeling boopy - *{}* sacrifices themself involuntarily.',
'somehow you end up booping *{}*.',
'you climb *{}*\'s head and use it as a bouncy castle... they feel amused.']
itemList = ['you put your hand onto *{}*\'s head. *Bliss.*',
'your hand touches *{}*\'s snoot - it feels satisfying.',
'you happily boop *{}*, it\'s lovely!',
'you just can\'t bring yourself to boop *{}* - so you just let your hand linger...',
'you attempt to boop *{}*, but you\'re clumsier than you remember - and fail...',
'you boop *{}*.',
'*{}* feels annoyed from your booping.',
'*{}* starts resembling a happy pupper.']
class spooky(actionable):
nothingList = [ 'you spook no one but yourself',
'you spook nothing, sp00py...',
'sadly, no one got spooked',
'it is sp00... you can\t spook air']
botList = [ 'you scared the living pumpkin out of me!',
'you spooked me so hard, I got the Heebie-jeebies...', # https://www.myenglishteacher.eu/blog/idioms-for-being-afraid/
'you sp00p me? But I\'m a bot... I can\'t be spooked!',
'sorry, but I cannot let you spook me; My digital emotions will get all messed up!'
'aaaaaaaaaah! Don\t you scare me like that again!']
selfList = ['go watch a scary movie to be absolutely sp00ped!',
'boo! Did you scare you?',
'you look yourself in the mirror and get a little scared...',
'get spooked by... yourself?',
'sp00py, but why spook yourself?']
memberList = [ 'you sp00p *{}* so hard that they start screaming!',
'you tried to sneak up on *{}*, but they heard you sneakin\' and fail...',
'it is sp00py time! Hey *{}*, boo!',
'congrats, *{}* dun sp00ked.',
'get spook3d *{}*!']
itemList = ['you spook *{}* with no reaction, leaving you looking weird...',
'*{}* got sp00p3d so hard, it ran away!',
'you trick or treat *{}* without any reaction...',
'you do your best to sp00p *{}*, but fail...',
'sp00py time! *{}* gets sp00ped harder than you thought and starts crying!']
class highfives(actionable):
nothingList = [ 'you stand alone for an eternity, hand raised up - desperate for any sort of recognition...',
'with a wild swing you throw your hand forward - the momentum carries you to the ground and you just lay there - high fiveless...',
'the only sound you hear as a soft *whoosh* as your hand connects with nothing...']
botList = [ 'the sky erupts with 1\'s and 0\'s as our hands meet in an epic high five of glory!',
'you beam up to the cloud and receive a quick high five from me before downloading back to Earth.',
'I unleash a fork-bomb of high five processes!',
'01001000011010010110011101101000001000000100011001101001011101100110010100100001']
selfList = ['ahh - high fiving yourself, classy...',
'that\'s uh... that\'s just clapping...',
'you run in a large circle - *totally* high fiving all your friends...',
'now you\'re at both ends of a high five!']
memberList = [ 'you and *{}* jump up for an epic high five - freeze-framing as the credits roll and some wicked 80s synth plays out.',
'you and *{}* elevate to a higher plane of existence in wake of that tremendous high five!',
'a 2 hour, 3 episode anime-esque fight scene unfolds as you and *{}* engage in a world-ending high five!',
'it *was* tomorrow - before you and *{}* high fived with enough force to spin the Earth in reverse!',
'like two righteous torpedoes - you and *{}* connect palms, subsequently deafening everyone in a 300-mile radius!']
itemList = ['neat... you just high fived *{}*.',
'your hand flops through the air - hitting *{}* with a soft thud.',
'you reach out a hand, gently pressing your palm to *{}*. A soft *"high five"* escapes your lips as a tear runs down your cheek...',
'like an open-handed piston of ferocity - you drive your palm into *{}*.']
class petting(actionable): # meow
nothingList = [ 'you absentmindedly wave your hand in the air.',
'you could have sworn there was a cat there!',
'you remember that there are no cats here.',
'you try to pet the cat, but miss because the cat is gone.']
botList = [ 'I may be electronic but I still appreciate pets.',
'*purrrrrrrrrrrrrrr*.',
'you electrocute yourself trying to pet a computer.']
selfList = ['you give yourself a nice pat on the head.',
'too bad there\'s no one else to pet you.',
'in lieu of anything else to pet, you pet yourself.',
'your hair is warm and soft.']
memberList = [ 'you give *{}* a pat on the head.',
'you rub your hand through *{}\'s* hair.',
'*{}* smiles from your petting.',
'you try to pet *{}*, but miss because they hid under the bed.',
'*{}* purrs from your petting.',
'you pet *{}* but they bite your hand',
'you try to pet *{}* but they hiss and run away.']
itemList = ['you rub *{}* but it doesn\'t feel like a cat.',
'you don\'t hear any purring from *{}*.',
'you hurt your hand trying to pet *{}*.']
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def eat(self, ctx, *, member : str = None):
"""Eat like a boss."""
msg = self.eating.computeAction(self.eating, self.bot, ctx, member) #python is silly and makes me do this for uninitialized classes
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def drink(self, ctx, *, member : str = None):
"""Drink like a boss."""
msg = self.drinking.computeAction(self.drinking, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def boop(self, ctx, *, member : str = None):
"""Boop da snoot."""
msg = self.booping.computeAction(self.booping, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def spook(self, ctx, *, member : str = None):
"""sp00ktober by camiel."""
if datetime.date.today().month == 10:
# make it extra sp00py because it is spooktober
await ctx.message.add_reaction("🎃")
msg = self.spooky.computeAction(self.spooky, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def highfive(self, ctx, *, member : str = None):
"""High five like a boss."""
msg = self.highfives.computeAction(self.highfives, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def pet(self, ctx, *, member : str = None):
"""pet kitties."""
msg = self.petting.computeAction(self.petting, self.bot, ctx, member)
await ctx.channel.send(msg)
return
| 2.984375 | 3 |
marltoolbox/examples/tune_function_api/lola_pg_official.py | tobiasbaumann1/amd | 0 | 2365 | <reponame>tobiasbaumann1/amd<filename>marltoolbox/examples/tune_function_api/lola_pg_official.py
##########
# Additional dependencies are needed:
# Follow the LOLA installation described in the tune_class_api/lola_pg_official.py file
##########
import os
import ray
from ray import tune
import marltoolbox.algos.lola.envs as lola_envs
import marltoolbox.algos.lola_dice.envs as lola_dice_envs
from marltoolbox.algos.lola import train_cg, train_exact, train_pg
from marltoolbox.envs.vectorized_coin_game import CoinGame, AsymCoinGame
from marltoolbox.utils import log
def trainer_fn(exp_name, num_episodes, trace_length, exact, pseudo, grid_size,
lr, lr_correction, batch_size, bs_mul, simple_net, hidden, reg,
gamma, lola_update, opp_model, mem_efficient, seed, set_zero,
warmup, changed_config, ac_lr, summary_len, use_MAE,
use_toolbox_env, clip_lola_update_norm, clip_loss_norm, entropy_coeff,
weigth_decay, **kwargs):
# Instantiate the environment
if exp_name == "IPD":
env = lola_envs.IPD(trace_length)
elif exp_name == "IMP":
env = lola_envs.IMP(trace_length)
elif exp_name == "CoinGame":
if use_toolbox_env:
env = CoinGame(config={
"batch_size": batch_size,
"max_steps": trace_length,
"grid_size": grid_size,
"get_additional_info": True,
"add_position_in_epi": False,
})
else:
env = lola_dice_envs.CG(trace_length, batch_size, grid_size)
env.seed(seed)
elif exp_name == "AsymCoinGame":
if use_toolbox_env:
env = AsymCoinGame(config={
"batch_size": batch_size,
"max_steps": trace_length,
"grid_size": grid_size,
"get_additional_info": True,
"add_position_in_epi": False,
})
else:
env = lola_dice_envs.AsymCG(trace_length, batch_size, grid_size)
env.seed(seed)
else:
raise ValueError(f"exp_name: {exp_name}")
# Import the right training function
if exact:
train_exact.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
simple_net=simple_net,
corrections=lola_update,
pseudo=pseudo,
num_hidden=hidden,
reg=reg,
lr=lr,
lr_correction=lr_correction,
gamma=gamma)
elif exp_name in ("IPD", "IMP"):
train_pg.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
gamma=gamma,
set_zero=set_zero,
lr=lr,
corrections=lola_update,
simple_net=simple_net,
hidden=hidden,
mem_efficient=mem_efficient)
elif exp_name in ("CoinGame", "AsymCoinGame"):
train_cg.train(env,
num_episodes=num_episodes,
trace_length=trace_length,
batch_size=batch_size,
bs_mul=bs_mul,
gamma=gamma,
grid_size=grid_size,
lr=lr,
corrections=lola_update,
opp_model=opp_model,
hidden=hidden,
mem_efficient=mem_efficient,
asymmetry=exp_name == "AsymCoinGame",
warmup=warmup,
changed_config=changed_config,
ac_lr=ac_lr,
summary_len=summary_len,
use_MAE=use_MAE,
use_toolbox_env=use_toolbox_env,
clip_lola_update_norm=clip_lola_update_norm,
clip_loss_norm=clip_loss_norm,
entropy_coeff=entropy_coeff,
weigth_decay=weigth_decay,
)
else:
raise ValueError(f"exp_name: {exp_name}")
def lola_training(config):
trainer_fn(**config)
def get_tune_config(full_config: dict) -> dict:
# Sanity
assert full_config['exp_name'] in {"CoinGame", "IPD", "IMP", "AsymCoinGame"}
if full_config['exact']:
assert full_config['exp_name'] != "CoinGame", "Can't run CoinGame with --exact."
assert full_config['exp_name'] != "AsymCoinGame", "Can't run AsymCoinGame with --exact."
# Resolve default parameters
if full_config['exact']:
full_config['num_episodes'] = 50 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 200 if full_config['trace_length'] is None else full_config['trace_length']
full_config['lr'] = 1. if full_config['lr'] is None else full_config['lr']
elif full_config['exp_name'] in {"IPD", "IMP"}:
full_config['num_episodes'] = 600000 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 150 if full_config['trace_length'] is None else full_config['trace_length']
full_config['batch_size'] = 4000 if full_config['batch_size'] is None else full_config['batch_size']
full_config['lr'] = 1. if full_config['lr'] is None else full_config['lr']
elif full_config['exp_name'] == "CoinGame" or full_config['exp_name'] == "AsymCoinGame":
full_config['num_episodes'] = 100000 if full_config['num_episodes'] is None else full_config['num_episodes']
full_config['trace_length'] = 150 if full_config['trace_length'] is None else full_config['trace_length']
full_config['batch_size'] = 4000 if full_config['batch_size'] is None else full_config['batch_size']
full_config['lr'] = 0.005 if full_config['lr'] is None else full_config['lr']
if full_config['exp_name'] in ("IPD", "CoinGame", "AsymCoinGame"):
full_config['gamma'] = 0.96 if full_config['gamma'] is None else full_config['gamma']
elif full_config['exp_name'] == "IMP":
full_config['gamma'] = 0.9 if full_config['gamma'] is None else full_config['gamma']
return full_config
def main(debug):
exp_name, _ = log.log_in_current_day_dir(f"LOLA_PG")
tune_hparams = {
"exp_name": exp_name,
# Dynamically set
"num_episodes": 3 if debug else None,
"trace_length": 6 if debug else None,
"lr": None,
"gamma": None,
"batch_size": 12 if debug else None,
# "exp_name": "IPD",
# "exp_name": "IMP",
"exp_name": "CoinGame",
# "exp_name": "AsymCoinGame",
"pseudo": False,
"grid_size": 3,
"lola_update": True,
"opp_model": False,
"mem_efficient": True,
"lr_correction": 1,
"bs_mul": 1 / 10,
"simple_net": True,
"hidden": 32,
"reg": 0,
"set_zero": 0,
"exact": False,
"warmup": 1,
"seed": 1,
"changed_config": False,
"ac_lr": 1.0,
"summary_len": 1,
"use_MAE": False,
"use_toolbox_env": True,
"clip_loss_norm": False,
"clip_lola_update_norm": False,
"clip_lola_correction_norm": 3.0,
"clip_lola_actor_norm": 10.0,
"entropy_coeff": 0.001,
"weigth_decay": 0.03,
}
tune_config = get_tune_config(tune_hparams)
ray.init(num_cpus=os.cpu_count(), num_gpus=0)
tune_analysis = tune.run(lola_training, name=tune_hparams["exp_name"], config=tune_config)
ray.shutdown()
return tune_analysis
if __name__ == "__main__":
debug_mode = True
main(debug_mode)
| 2.15625 | 2 |
src/cut_link/utils.py | true7/srt | 0 | 2366 | <gh_stars>0
import string
import random
import json
from calendar import month_name
from django.conf import settings
SHORTLINK_MIN = getattr(settings, "SHORTLINK_MIN", 6)
def code_generator(size=SHORTLINK_MIN):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def create_shortlink(instance):
new_link = code_generator()
class_ = instance.__class__
query_set = class_.objects.filter(shortlink=new_link)
if query_set.exists():
return create_shortlink()
return new_link
def json_data_func(instance):
''' Return json format data, ready for passing into AmCharts.
Contains 2 items - name of the month and count of distinct
links, which were cut on the website.
'''
class_ = instance.__class__
# FIXME. The problem is every next year it will add results above
result = []
for month in range(1, len(month_name)):
count_use = class_.objects.filter(pub_date__month=month).count()
data = dict(month=month_name[month], count=count_use)
result.append(data)
json_data = json.dumps(result)
return json_data
| 2.515625 | 3 |
lib/tool_shed/scripts/bootstrap_tool_shed/bootstrap_util.py | blankenberg/galaxy-data-resource | 0 | 2367 | #!/usr/bin/python
import argparse
import ConfigParser
import os
import sys
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] )
sys.path = new_path
from galaxy import eggs
eggs.require( "SQLAlchemy >= 0.4" )
import galaxy.webapps.tool_shed.model.mapping as tool_shed_model
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.exc import OperationalError
from tool_shed.util import xml_util
def check_db( config_parser ):
dburi = None
if config_parser.has_option( 'app:main', 'database_connection' ):
dburi = config_parser.get( 'app:main', 'database_connection' )
elif config_parser.has_option( 'app:main', 'database_file' ):
db_file = config_parser.get( 'app:main', 'database_file' )
dburi = "sqlite:///%s?isolation_level=IMMEDIATE" % db_file
else:
print 'The database configuration setting is missing from the tool_shed.ini file. Add this setting before attempting to bootstrap.'
exit(1)
sa_session = None
database_exists_message = 'The database configured for this Tool Shed is not new, so bootstrapping is not allowed. '
database_exists_message += 'Create a new database that has not been migrated before attempting to bootstrap.'
try:
model = tool_shed_model.init( config_parser.get( 'app:main', 'file_path' ), dburi, engine_options={}, create_tables=False )
sa_session = model.context.current
print database_exists_message
exit(1)
except ProgrammingError, e:
pass
except OperationalError, e:
pass
try:
if sa_session is not None:
result = sa_session.execute( 'SELECT version FROM migrate_version' ).first()
if result[0] >= 2:
print database_exists_message
exit(1)
else:
pass
except ProgrammingError, e:
pass
if config_parser.has_option( 'app:main', 'hgweb_config_dir' ):
hgweb_config_parser = ConfigParser.ConfigParser()
hgweb_dir = config_parser.get( 'app:main', 'hgweb_config_dir' )
hgweb_config_file = os.path.join( hgweb_dir, 'hgweb.config' )
if not os.path.exists( hgweb_config_file ):
exit(0)
hgweb_config_parser.read( hgweb_config_file )
configured_repos = hgweb_config_parser.items( 'paths' )
if len( configured_repos ) >= 1:
message = "This Tool Shed's hgweb.config file contains entries, so bootstrapping is not allowed. Delete"
message += " the current hgweb.config file along with all associated repositories in the configured "
message += "location before attempting to boostrap."
print
exit(1)
else:
exit(0)
else:
exit(0)
exit(0)
def admin_user_info( config_parser ):
user_info_config = os.path.abspath( os.path.join( os.getcwd(), 'lib/tool_shed/scripts/bootstrap_tool_shed', 'user_info.xml' ) )
tree, error_message = xml_util.parse_xml( user_info_config )
if tree is None:
print "The XML file ", user_info_config, " seems to be invalid, using defaults."
email = '<EMAIL>'
password = '<PASSWORD>'
username = 'admin'
else:
root = tree.getroot()
for elem in root:
if elem.tag == 'email':
email = elem.text
elif elem.tag == 'password':
password = elem.text
elif elem.tag == 'username':
username = elem.text
print '%s__SEP__%s__SEP__%s' % ( username, email, password )
return 0
def get_local_tool_shed_url( config_parser ):
port = '9009'
if config_parser.has_section( 'server:main' ):
if config_parser.has_option( 'server:main', 'port' ):
port = config_parser.get( 'server:main', 'port' )
host = '127.0.0.1'
print 'http://%s:%s' % ( host, port )
return 0
def main( args ):
config_parser = ConfigParser.ConfigParser()
if os.path.exists( args.config ):
config_parser.read( args.config )
else:
return 1
if args.method == 'check_db':
return check_db( config_parser )
elif args.method == 'admin_user_info':
return admin_user_info( config_parser )
elif args.method == 'get_url':
return get_local_tool_shed_url( config_parser )
else:
return 1
parser = argparse.ArgumentParser()
parser.add_argument( '-c', '--config_file', dest='config', action='store', default='config/tool_shed.ini.sample' )
parser.add_argument( '-e', '--execute', dest='method', action='store', default='check_db' )
args = parser.parse_args()
if __name__ == '__main__':
exit( main( args ) )
| 2.1875 | 2 |
moto/dynamodbstreams/responses.py | jonnangle/moto-1 | 3 | 2368 | <reponame>jonnangle/moto-1<gh_stars>1-10
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import dynamodbstreams_backends
from six import string_types
class DynamoDBStreamsHandler(BaseResponse):
@property
def backend(self):
return dynamodbstreams_backends[self.region]
def describe_stream(self):
arn = self._get_param("StreamArn")
return self.backend.describe_stream(arn)
def list_streams(self):
table_name = self._get_param("TableName")
return self.backend.list_streams(table_name)
def get_shard_iterator(self):
arn = self._get_param("StreamArn")
shard_id = self._get_param("ShardId")
shard_iterator_type = self._get_param("ShardIteratorType")
sequence_number = self._get_param("SequenceNumber")
# according to documentation sequence_number param should be string
if isinstance(sequence_number, string_types):
sequence_number = int(sequence_number)
return self.backend.get_shard_iterator(
arn, shard_id, shard_iterator_type, sequence_number
)
def get_records(self):
arn = self._get_param("ShardIterator")
limit = self._get_param("Limit")
if limit is None:
limit = 1000
return self.backend.get_records(arn, limit)
| 2.234375 | 2 |
tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py | ytorzuk-altran/openvino | 1 | 2369 | <reponame>ytorzuk-altran/openvino
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
from openvino.tools.mo.ops.const import Const
class ZerosFrontExtractor(FrontExtractorOp):
op = '_zeros'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
shape = list(attrs.tuple('shape', int, None))
zero_shapes = []
for i, s in enumerate(shape):
if s == 0:
shape[i] = 1
zero_shapes.append(i)
update_attrs = {
'shape': np.ndarray(shape),
'value': np.zeros(shape),
'zero_shapes': zero_shapes
}
# update the attributes of the node
Const.update_node_stat(node, update_attrs)
return cls.enabled
| 2.015625 | 2 |
tools/jslib_builder.py | Jumpscale/jumpscale_portal8 | 0 | 2370 |
from JumpScale import j
class builder():
# @property
# def buildDir(self):
# return j.sal.fs.joinPaths(j.dirs.tmpDir, "jsbuilder")
@property
def cuisine(self):
return j.tools.cuisine.local
# ALL NOT NEEDED ANY LONGER USE bower
# def angular(self):
# version = "1.5.9"
# url = "http://code.angularjs.org/%s/angular-%s.zip" % (version, version)
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "angular")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "angular-%s" % sversion), dpath)
# # self._removeMapFiles(dpath)
#
# def _removeMapFiles(self, path):
# for item in j.sal.fs.find(path, "*.js.map"):
# item = "%s/%s" % (path, item)
# # print(item)
# j.sal.fs.remove(item)
#
# def bootstrap(self):
# version = "3.3.7"
# url = "https://github.com/twbs/bootstrap/releases/download/v%s/bootstrap-%s-dist.zip" % (version, version)
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "bootstrap")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "bootstrap-%s-dist" % version), dpath)
# # self._removeMapFiles(dpath)
#
# def codemirror(self):
#
# version = "5.9"
# url = "http://codemirror.net/codemirror-%s.zip" % version
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "codemirror")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "codemirror-%s" % version), dpath)
# @property
# def npm(self):
# if self._npm == False:
# if j.sal.fs.exists("%s/npm" % j.dirs.binDir, followlinks=True) == False:
# self.cuisine.apps.nodejs.install()
# self._npm = "%snpm" % j.dirs.binDir
# return self._npm
# @property
# def bower(self):
# if self._bower == False:
# if j.sal.fs.exists("%s/bower" % j.dirs.binDir, followlinks=True) == False:
# self.cuisine.apps.nodejs.install()
# self._bower = "%sbower" % j.dirs.binDir
# return self._bower
# def famous(self):
# url = "https://github.com/Famous/engine-seed"
# cdest = j.do.pullGitRepo(url)
# res = j.sal.process.executeWithoutPipe("cd %s;%s install" % (cdest, self.npm))
#
# def flatui(self):
# url = "https://github.com/designmodo/Flat-UI.git"
# cdest = j.do.pullGitRepo(url)
# print("npm/bower install")
# res = j.sal.process.executeWithoutPipe("cd %s;%s install;%s install" % (cdest, self.npm, self.bower))
#
# def do1(self):
# j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.tmpDir, "jsbuilder"))
# if self.checkIPFS == False:
# self.getIPFS()
# # self.angular()
# # self.bootstrap()
# # self.codemirror()
# # self.famous()
# self.flatui()
def do(self):
if self.checkIPFS == False:
self.getIPFS()
# self.cuisine.apps.nodejs.bowerInstall(["jquery", "flatui", "bootstrap", "famous", "codemirror", "font-awesome", "jqplot",
# "underscore", "spin", "moment", "http://DlhSoft.com/Packages/DlhSoft.KanbanLibrary.zip", "jqwidgets", "d3", "angular-latest"])
cmd = "cd $tmpDir/bower;ipfs -c $cfgDir/ipfs/main/ add -r bower_components"
print("IPFS upload, can take couple of minutes")
res = self.cuisine.core.run(cmd)
def checkIPFS(self):
return j.sal.nettools.checkUrlReachable("http://localhost:5001/webui") == True
def getIPFS(self):
j.tools.cuisine.local.apps.ipfs.install()
j.tools.cuisine.local.apps.ipfs.start()
b = builder()
b.do()
| 2.140625 | 2 |
SimpleSimulator/samuelator.py | Anindya-Prithvi/CO_M21_Assignment | 3 | 2371 | <gh_stars>1-10
import sys
import warnings
import matplotlib.pyplot as plt
from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot
warnings.filterwarnings("ignore")
MEM = IMACC(sys.stdin.read()) # Load memory from stdin
PC = PROGC(0) # Start from the first instruction
RF = REGFLPC() # initialize register and flags
EE = ExecE(MEM)
IM = IMG()
halted = False
cycle = 0
if MEM.inst_mem == ["0" * 16 for i in range(256)]:
halted = True
while not halted:
Instruction = MEM.getData(PC) # Get current instruction
IM.imgx.append(cycle)
IM.imgy.append(PC.PC)
halted, new_PC, new_regs = EE.execute(Instruction, RF.asdct(), IM, cycle)
# Update RF compute new_PC
RF.update(new_regs, new_PC)
PC.dump()
# Print PC
RF.dump()
# Print RF state
PC.update(new_PC)
# Update PC
cycle += 1
MEM.dump() # Print memory state
# plotting
plot(plt, IM)
| 2.53125 | 3 |
utils/converters.py | LiReNa00/JDBot | 0 | 2372 | <filename>utils/converters.py<gh_stars>0
import discord
import re
import emoji
import contextlib
import typing
import datetime
from discord.ext import commands
from discord.http import Route
class BetterMemberConverter(commands.Converter):
async def convert(self, ctx, argument):
try:
user = await commands.MemberConverter().convert(ctx, argument)
except commands.MemberNotFound:
user = None
if user is None:
tag = re.match(r"#?(\d{4})", argument)
if tag:
if ctx.guild:
test = discord.utils.get(ctx.guild.members, discriminator=tag.group(1))
user = test or ctx.author
if ctx.guild is None:
user = await BetterUserconverter().convert(ctx, argument)
user = user or ctx.author
return user
class BetterUserconverter(commands.Converter):
async def convert(self, ctx, argument):
try:
user = await commands.UserConverter().convert(ctx, argument)
except commands.UserNotFound:
user = None
if not user and ctx.guild:
try:
user = await commands.MemberConverter().convert(ctx, argument)
except commands.MemberNotFound:
user = None
if user is None:
role = None
with contextlib.suppress(commands.RoleNotFound, commands.NoPrivateMessage):
role = await commands.RoleConverter().convert(ctx, argument)
if role:
if role.is_bot_managed():
user = role.tags.bot_id
user = await ctx.bot.try_user(user)
if user is None:
tag = re.match(r"#?(\d{4})", argument)
if tag and not ctx.bot.users:
test = discord.utils.get(ctx.bot.users, discriminator=tag.group(1))
user = test or ctx.author
return user
class EmojiBasic:
def __init__(self, id: int, url: str):
self.id = id
self.url = url
@classmethod
async def convert(cls, ctx, argument):
match = re.match(r"(?P<id>[0-9]{15,21})", argument)
if match:
emoji_id = match.group(0)
extentions = ["gif", "png"]
for x in extentions:
response = await ctx.bot.session.get(f"https://cdn.discordapp.com/emojis/{emoji_id}.{x}")
if response.ok:
return cls(emoji_id, response.real_url)
else:
return None
class EmojiConverter(commands.Converter):
async def convert(self, ctx: commands.Context, arg: str):
emojis = emoji.unicode_codes.EMOJI_UNICODE["en"].values()
try:
return await commands.PartialEmojiConverter().convert(ctx, arg)
except commands.PartialEmojiConversionFailure:
pass
if arg.rstrip("\N{variation selector-16}") in emojis or arg in emojis:
return discord.PartialEmoji(name=arg)
else:
raise commands.BadArgument(f"{arg} is not an emoji")
class ColorConverter(commands.Converter):
async def convert(self, ctx, argument):
try:
color = await commands.ColourConverter().convert(ctx, argument)
except commands.BadColourArgument:
color = None
if not color and not argument.isdigit():
argument = list(s for s in argument.split(" ") if s)
if color and argument.isdigit():
argument = int(argument)
if isinstance(argument, int):
if argument > 16777215:
await ctx.send(f"{argument} is not valid color, 16777215 will be used instead.")
argument = 16777215
color = discord.Colour(argument)
if isinstance(argument, list):
argument = sorted(filter(lambda x: x.isdigit(), argument))
argument = [int(n) for n in argument][:3]
try:
color = discord.Colour.from_rgb(*argument)
except TypeError:
color = None
if color:
if color.value > 16777215:
color = discord.Colour(16777215)
return color
def generate_snowflake(dt: typing.Optional[datetime.datetime] = None) -> int:
"""Returns a numeric snowflake pretending to be created at the given date but more accurate and random than time_snowflake.
If No dt is not passed, it makes one from the current time using utcnow.
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
dt = dt or discord.utils.utcnow()
return int(dt.timestamp() * 1000 - 1420070400000) << 22 | 0x3FFFFF
class ObjectPlus(discord.Object):
@property
def worker_id(self) -> int:
""":class:`int`: Returns the worker id that made the snowflake."""
return (self.id & 0x3E0000) >> 17
@property
def process_id(self) -> int:
""":class:`int`: Returns the process id that made the snowflake."""
return (self.id & 0x1F000) >> 12
@property
def increment_id(self) -> int:
""":class:`int`: Returns the increment id that made the snowflake."""
return self.id & 0xFFF
class ObjectPlusConverter(commands.converter.IDConverter[commands.Converter]):
async def convert(self, ctx: commands.Context, argument: str) -> ObjectPlus:
match = self._get_id_match(argument) or re.match(r"<(?:@(?:!|&)?|#)([0-9]{15,20})>$", argument)
if match is None:
raise discord.errors.ObjectNotFound(argument)
result = int(match.group(1))
return ObjectPlus(id=result)
# remove if edpy adds my pull request into the master.
| 2.421875 | 2 |
kissim/cli/encode.py | AJK-dev/kissim | 15 | 2373 | """
kissim.cli.encode
Encode structures (generate fingerprints) from CLI arguments.
"""
import numpy as np
from kissim.api import encode
from kissim.cli.utils import configure_logger
def encode_from_cli(args):
"""
Encode structures.
Parameters
----------
args : argsparse.Namespace
CLI arguments.
"""
configure_logger(args.output)
structure_klifs_ids = _parse_structure_klifs_ids(args.input)
encode(structure_klifs_ids, args.output, args.local, args.ncores)
def _parse_structure_klifs_ids(args_input):
"""
Parse structure KLIFS IDs.
Parameters
----------
args_input : list of str
Either path to txt file with structure KLIFS ID (one ID per row) or one or more structure
KLIFS IDs.
Returns
-------
list of int
List of structure KLIFS IDs.
"""
if len(args_input) == 1:
try:
structure_klifs_ids = [int(args_input[0])]
except ValueError:
structure_klifs_ids = np.genfromtxt(fname=args_input[0], dtype=int).tolist()
else:
structure_klifs_ids = [int(i) for i in args_input]
return structure_klifs_ids
| 2.71875 | 3 |
distanceProfile.py | ZiyaoWei/pyMatrixProfile | 29 | 2374 | import numpy as np
from util import *
def naiveDistanceProfile(tsA, idx, m, tsB = None):
"""Return the distance profile of query against ts. Use the naive all pairs comparison algorithm.
>>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
distanceProfile = []
n = len(tsB)
for i in range(n - m + 1):
distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m]))
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
def stampDistanceProfile(tsA, idx, m, tsB = None):
"""
>>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
n = len(tsB)
distanceProfile = mass(query, tsB)
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2.65625 | 3 |
test_0000.py | theo-dim/cash-gels-thesis | 0 | 2375 | import pyplot as plt
import numpy as np
from sklearn import linear_model
| 1.203125 | 1 |
vnTrader/uiMainWindow.py | bttt123/TradeSim | 0 | 2376 | # encoding: UTF-8
from builtins import str
import psutil
# import sys
# PyQt 4/5 compatibility
try:
from PyQt4.QtGui import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt4 import QtCore
except ImportError:
from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt5 import QtCore
from uiBasicWidget import *
import uiBasicWidget as wgs
#from . import uiBasicWidget as wgs
########################################################################
class MainWindow(QMainWindow):
"""主窗口"""
signalStatusBar = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, app, sheets):
"""Constructor"""
super(MainWindow, self).__init__()
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.app = app
self.sheets = sheets
self.widgetDict = {} # 用来保存子窗口的字典
self.initUi()
self.eventEngine.register(EVENT_TITLE, self.updateTitle)
self.sid = None
def updateTitle(self, event):
(user, stratid) = event.dict_['data']
#self.setWindowTitle('VnTrader: ' + str(user) + "/" + str(stratid))
self.sid = stratid
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle('VnTrader')
self.initCentral()
self.initMenu()
# self.initStatusBar()
def showLogin(self):
self.connectQuantOS()
# ----------------------------------------------------------------------
def initCentral(self):
"""初始化中心区域"""
widgetTradingW, dockTradingW = self.createDock(wgs.TradingWidget, u'交易', QtCore.Qt.LeftDockWidgetArea)
widgetMarketM, dockMarketM = self.createDock(wgs.MarketMonitor, u'行情', QtCore.Qt.RightDockWidgetArea)
widgetPositionM, dockPositionM = self.createDock(wgs.PositionMonitor, u'持仓', QtCore.Qt.RightDockWidgetArea)
widgetAccountM, dockAccountM = self.createDock(wgs.AccountMonitor, u'资金', QtCore.Qt.BottomDockWidgetArea)
widgetContractM, dockContractM = self.createDock(wgs.ContractMonitor, u'合约', QtCore.Qt.BottomDockWidgetArea)
widgetLogM, dockLogM = self.createDock(wgs.LogMonitor, u'日志', QtCore.Qt.BottomDockWidgetArea)
widgetTradeM, dockTradeM = self.createDock(wgs.TradeMonitor, u'成交', QtCore.Qt.BottomDockWidgetArea)
widgetOrderM, dockOrderM = self.createDock(wgs.OrderMonitor, u'委托', QtCore.Qt.BottomDockWidgetArea)
self.tabifyDockWidget(dockContractM, dockTradeM)
self.tabifyDockWidget(dockTradeM, dockOrderM)
self.tabifyDockWidget(dockAccountM, dockLogM)
dockOrderM.raise_()
dockLogM.raise_()
# 连接组件之间的信号
widgetPositionM.itemDoubleClicked.connect(widgetTradingW.closePosition)
widgetMarketM.itemDoubleClicked.connect(widgetTradingW.fillSymbol)
# ----------------------------------------------------------------------
def initMenu(self):
"""初始化菜单"""
# 创建操作
connectQuantOSAction = QAction(u'连接和切换策略', self)
connectQuantOSAction.triggered.connect(self.connectQuantOS)
exitAction = QAction(u'退出', self)
exitAction.triggered.connect(self.close)
aboutAction = QAction(u'关于', self)
aboutAction.triggered.connect(self.openAbout)
colorAction = QAction(u'变色', self)
colorAction.triggered.connect(self.changeColor)
# 创建菜单
menubar = self.menuBar()
# 设计为只显示存在的接口
sysMenu = menubar.addMenu(u'系统')
if 'quantos' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectQuantOSAction)
sysMenu.addSeparator()
sysMenu.addAction(exitAction)
# 帮助
helpMenu = menubar.addMenu(u'帮助')
helpMenu.addAction(aboutAction)
helpMenu.addAction(colorAction)
# ----------------------------------------------------------------------
def initStatusBar(self):
"""初始化状态栏"""
self.statusLabel = QLabel()
self.statusLabel.setAlignment(QtCore.Qt.AlignLeft)
self.statusBar().addPermanentWidget(self.statusLabel)
self.statusLabel.setText(self.getCpuMemory())
self.sbCount = 0
self.sbTrigger = 10 # 10秒刷新一次
self.signalStatusBar.connect(self.updateStatusBar)
self.eventEngine.register(EVENT_TIMER, self.signalStatusBar.emit)
# ----------------------------------------------------------------------
def updateStatusBar(self, event):
"""在状态栏更新CPU和内存信息"""
self.sbCount += 1
if self.sbCount == self.sbTrigger:
self.sbCount = 0
self.statusLabel.setText(self.getCpuMemory())
# ----------------------------------------------------------------------
def getCpuMemory(self):
"""获取CPU和内存状态信息"""
cpuPercent = psutil.cpu_percent()
memoryPercent = psutil.virtual_memory().percent
return u'CPU使用率:%d%% 内存使用率:%d%%' % (cpuPercent, memoryPercent)
# ----------------------------------------------------------------------
def connectQuantOS(self):
self.mainEngine.connect('quantos')
# ----------------------------------------------------------------------
def openAbout(self):
"""打开关于"""
try:
self.widgetDict['aboutW'].show()
except KeyError:
self.widgetDict['aboutW'] = AboutWidget(self)
self.widgetDict['aboutW'].show()
# ----------------------------------------------------------------------
def closeEvent(self, event):
"""关闭事件"""
reply = QMessageBox.question(self, u'退出',
u'确认退出?', QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
for widget in list(self.widgetDict.values()):
widget.close()
self.mainEngine.exit()
event.accept()
else:
event.ignore()
# ----------------------------------------------------------------------
def createDock(self, widgetClass, widgetName, widgetArea):
"""创建停靠组件"""
widget = widgetClass(self.mainEngine, self.eventEngine)
dock = QDockWidget(widgetName)
dock.setWidget(widget)
dock.setObjectName(widgetName)
dock.setFeatures(dock.DockWidgetFloatable | dock.DockWidgetMovable)
self.addDockWidget(widgetArea, dock)
return widget, dock
def changeColor(self):
self.app.setStyleSheet(self.sheets[1])
self.sheets = [self.sheets[1], self.sheets[0]]
########################################################################
class AboutWidget(QDialog):
"""显示关于信息"""
# ----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(AboutWidget, self).__init__(parent)
self.initUi()
# ----------------------------------------------------------------------
def initUi(self):
""""""
self.setWindowTitle(u'关于VnTrader')
text = u"""
quantos trade client
"""
label = QLabel()
label.setText(text)
label.setMinimumWidth(500)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.setLayout(vbox)
| 1.96875 | 2 |
line_notify_core.py | ficgra/PChome-alertor | 1 | 2377 | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import requests
import json
import re
from flask import Flask, request, abort
import mysql.connector as mariadb
from mysql.connector import Error
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, FollowEvent,
)
app = Flask(__name__)
line_bot_api = LineBotApi('')
handler = WebhookHandler('')
@app.route("/", methods=['GET'])
def index():
return 'OK!'
#line 官方帳號 /callback測試Event
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
#line官方帳號收到訊息時的Event
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
get_message = event.message.text
print(get_message)
user_id = event.source.user_id
register_url = 'https://notify-bot.line.me/oauth/authorize?response_type=code&scope=notify&response_mode=form_post&client_id="id"&redirect_uri=https://line.husan.cc/register&state=' + user_id
mage = re.split(r'[\s]\s*',get_message)
try:
if mage[0] == "註冊":
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=register_url))
elif 'add' == mage[0]:
try:
notice = add_item(mage[1],user_id,mage[2])
except:
notice = add_item(mage[1],user_id,None)
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'del' == mage[0]:
notice = del_item(mage[1],user_id)
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'list' == mage[0]:
item_list ,price_list= search_sub(user_id)
notice = '您訂閱的項目有:'
for i in range(len(item_list)):
notice+='\n'
notice=notice + item_list[i] +'\t' +str(price_list[i])
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'send' == mage[0]:
acc_token = get_notify_id(user_id)
status = sent_message(mage[1],acc_token)
if status == 200:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='send OK!'))
else:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='請輸入指令:\nlist \n└查詢通知項目。\nadd 商品ID 價格 \n└新增商品通知,低於設定價格時通知。\nEX:add DYAJID-A900AVJ4G 500\ndel 商品ID \n└刪除商品通知。\nEX:del DYAJID-A900AVJ4G'))
except BaseException as e:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='指令錯誤,請重新確認!'))
print(e)
# get user id when reply
user_id = event.source.user_id
print("user_id =", user_id)
profile = line_bot_api.get_profile(user_id)
#notify註冊時會post至/register
@app.route("/register",methods=['POST']) #註冊事件
def register():
if request.method == 'POST':
code = request.form.get('code') #拿code去要access_token
print("code = ", code)
state = request.form.get('state') #state = user_id 使用者id
print("user_id = ",state)
profile = line_bot_api.get_profile(state)
user_name = profile.display_name
print("username = ",user_name) #帳號名稱
access_token = get_token(code) #取得access_token 發訊息給使用者的token
print("access_token = ",access_token)
r_code = send_test_message(access_token)#發測試通知
if r_code == 200:
save_profile(user_name, code, state, access_token)#存入資料庫
return '發送成功'
else:
return '發送失敗'
#加好友時發送通知
@handler.add(FollowEvent)
def handle_follow(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="感謝訂閱!請輸入\"註冊\"啟動服務。"))
#拿使用者code向notify-bot post取得access_token
def get_token(code):
headers = {
"Content-Type":"application/x-www-form-urlencoded"
}
params = {
"grant_type":"authorization_code",
"code": code,
"redirect_uri":"https://line.husan.cc/register", # host_ip
"client_id":"client_id", #notify client_id
"client_secret":"client_secret" #notify client_secret
}
r = requests.post('https://notify-bot.line.me/oauth/token',headers=headers,params=params)
source = json.loads(r.text)
access_token = source['access_token']
return access_token
#發送測試訊息至使用者notify
def send_test_message(access_token):
headers = {
"Authorization":"Bearer " + str(access_token),
"Content-Type":"application/x-www-form-urlencoded",
"notificationDisabled":"True"
}
params = {
"message":"\n帳號連結成功"
}
r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params)
return r.status_code
#使用者資料存入資料庫
def save_profile(username, code, user_id, access_token):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
db_Info = connection.get_server_info()
print("資料庫版本:", db_Info)
cursor = connection.cursor()
cursor.execute("INSERT INTO user_info (id, username, code, user_id, access_token) VALUES (null,'%s','%s','%s','%s')"%(username, code, user_id, access_token))
connection.commit() #存檔
cursor.execute("SELECT * FROM user_info")
# 列出查詢的資料
for i in cursor:
print(i)
except Error as e:
print("資料庫連接失敗0:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#print("資料庫連線已關閉")
#新增訂閱項目
def add_item(item_id, user_id,w_price):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
acc_token = get_notify_id(user_id)
try:
cursor.execute("INSERT INTO sub_list (item_id, w_price ,user_id, acc_token) VALUES ('%s','%d','%s','%s')"%(item_id, int(w_price) ,user_id, acc_token))
except:
cursor.execute("INSERT INTO sub_list (item_id,user_id, acc_token) VALUES ('%s','%s','%s')"%(item_id ,user_id, acc_token))
connection.commit() #存檔
return 'Add Done!'
except Error as e:
print("資料庫連接失敗2:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#刪除訂閱項目
def del_item(item_id, user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("DELETE FROM sub_list WHERE item_id = '%s' AND user_id = '%s'"%(item_id,user_id))
connection.commit() #存檔
return 'Delete Done!'
except Error as e:
print("資料庫連接失敗3:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#查詢訂閱項目
def search_sub(user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("SELECT item_id , w_price FROM sub_list WHERE user_id LIKE '%s'"%(user_id))
sub_item = cursor.fetchall()
price_list = [item[1] for item in sub_item]
item_list = [item[0] for item in sub_item]
return item_list,price_list
except Error as e:
print("資料庫連接失敗1:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#取得notify_access_token
def get_notify_id(user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
cursor.execute("SELECT access_token FROM user_info WHERE user_id LIKE '%s'"%(user_id))
acc_token = cursor.fetchall()
return acc_token[0][0]
except Error as e:
print("資料庫連接失敗4:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#發送訊息
def sent_message(message,access_token):
headers = {
"Authorization":"Bearer " + access_token,
"Content-Type":"application/x-www-form-urlencoded"
}
params = {
"message":message
}
r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params)
print(r.status_code)
return r.status_code
if __name__ == "__main__":
app.run('0.0.0.0',port=3000)
| 2.078125 | 2 |
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py | MachineLP/SFC_models | 21 | 2378 | <reponame>MachineLP/SFC_models<gh_stars>10-100
# coding=utf-8
from sfc_models.objects import *
from sfc_models.examples.Quick2DPlot import Quick2DPlot
register_standard_logs('output', __file__)
mod = Model()
country = Country(mod, 'CO')
Household(country, 'HH')
ConsolidatedGovernment(country, 'GOV')
FixedMarginBusiness(country, 'BUS', profit_margin=.025)
Market(country, 'GOOD')
Market(country, 'LAB')
TaxFlow(country, 'TAX', taxrate=.2)
# At time period 25, cut spending to 17 (from 20)
mod.AddExogenous('GOV', 'DEM_GOOD', [20.,]* 25 + [17.,]*20)
mod.AddGlobalEquation('DEBT_GDP', 'DEBT-TO-GDP RATIO', '-100.*GOV__F/BUS__SUP_GOOD')
mod.AddGlobalEquation('DEFICIT', 'DEFICIT', '-1.*GOV__INC')
mod.EquationSolver.MaxTime = 40
mod.main()
k = mod.GetTimeSeries('k')
Rat = mod.GetTimeSeries('DEBT_GDP')
Def = mod.GetTimeSeries('GOV__INC')
spend = mod.GetTimeSeries('GOV__DEM_GOOD')
p = Quick2DPlot([k, k], [spend, Def], title='Spending and Deficit', filename='intro_X_XX_multiplier_deficit.png',
run_now=False)
p.Legend = ['G', 'Deficit']
p.LegendPos = 'center left'
p.DoPlot()
Quick2DPlot(k, Rat, title='Debt-to-GDP Ratio', filename='intro_X_XX_multiplier_debt_gdp.png')
| 2.15625 | 2 |
auth_framework/settings.py | DrChai/django-auth-framework | 0 | 2379 | from importlib import import_module
from django.conf import settings
from django.core.signals import setting_changed
SOCIALACCOUNT_MODEL = getattr(settings, "REST_AUTH_SOCIALACCOUNT_MODEL", "auth_framework.SocialAccount")
DEFAULTS = {
'UNIQUE_EMAIL': True,
'RESET_PASSWORD_BY': 'pin', # 'url'| 'pin'
'SERIALIZERS': {
# 'SOCIAL_LOGIN_SERIALIZER': 'auth.social.serializers.DefaultSocialLoginSerializer',
'SIGNUP_SERIALIZER': 'auth_framework.serializers.signup_serializers.DefaultSignUpSerializer',
'USERINFO_SERIALIZER': None
},
'SOCIALACCOUNT_MODEL': SOCIALACCOUNT_MODEL,
'SOCIALACCOUNT_ADMIN_CLASS': "auth_framework.admin.SocialAccountAdmin",
# SOCIAL LOGINS
'SOCIAL_CALLBACK_URL': None, # eg: 'https://developers.google.com/oauthplayground'
'SOCIAL_AUTO_SIGNUP': False,
# SIGN UP
# 'SIGNUP_EMAIL_VERIFICATION': 'none', # trimmed out email verification celery task in closed source. fewer usage
'SIGNUP_USERNAME_REQUIRED': False,
'SIGNUP_USERNAME_VALIDATORS': [],
'USE_PASSWORD_TWICE_VALIDATION': True,
# ADVANCES
'USE_PHONENUMBER_FIELD': False,
'USE_CELERY_EMAIL': False,
'USE_ID_TOKEN': True,
'OAUTH_SAVE_ID_TOKEN': False
}
def import_callable(path_or_callable):
if path_or_callable is None:
return None
if hasattr(path_or_callable, '__call__'):
return path_or_callable
else:
assert isinstance(path_or_callable, str)
package, attr = path_or_callable.rsplit('.', 1)
return getattr(import_module(package), attr)
class AuthSettings:
"""
"""
def __init__(self, user_settings=None, defaults=None):
if user_settings:
self._user_settings = user_settings
self.defaults = defaults or DEFAULTS
self._cached_attrs = set()
@property
def user_settings(self):
if not hasattr(self, '_user_settings'):
self._user_settings = getattr(settings, 'AUTH_FRAMEWORK', {})
return self._user_settings
@property
def username_validators(self):
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth import get_user_model
validators = self.user_settings.get("SIGNUP_USERNAME_VALIDATORS", None)
if validators:
ret = []
if not isinstance(validators, list):
raise ImproperlyConfigured(
"SIGNUP_USERNAME_VALIDATORS is expected to be a list"
)
for path in validators:
pkg, attr = path.rsplit(".", 1)
validator = getattr(import_module(pkg), attr)
ret.append(validator())
else:
ret = (
get_user_model()._meta.get_field('username').validators
)
return ret
def serializers(self, data):
# Check if present in user settings
for key, value in data.items():
data[key] = import_callable(value)
return data
def __getattr__(self, attr):
if attr not in self.defaults:
raise AttributeError("Invalid setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
if isinstance(val, dict):
val = self.defaults[attr].copy()
val.update(self.user_settings[attr])
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
if attr == 'SERIALIZERS':
val = self.serializers(val)
# Cache the result
self._cached_attrs.add(attr)
setattr(self, attr, val)
return val
def reload(self):
for attr in self._cached_attrs:
delattr(self, attr)
self._cached_attrs.clear()
if hasattr(self, '_user_settings'):
delattr(self, '_user_settings')
app_settings = AuthSettings(None, DEFAULTS)
def reload_app_settings(*args, **kwargs):
setting = kwargs['setting']
if setting == 'AUTH_FRAMEWORK':
app_settings.reload()
setting_changed.connect(reload_app_settings)
| 1.90625 | 2 |
shorty/models.py | gkiserpong/shorty | 0 | 2380 | <filename>shorty/models.py
from django.db import models
from shorty.manager import UrlManager
class Url(models.Model):
long_url = models.URLField()
short_id = models.SlugField()
counter = models.IntegerField(default=0)
def __str__(self):
return "%s -- %s" % (self.long_url, self.short_id)
objects = UrlManager() | 2.390625 | 2 |
test/sec_full.py | time-track-tool/time-track-tool | 0 | 2381 | security = """
New Web users get the Roles "User,Nosy"
New Email users get the Role "User"
Role "admin":
User may access the rest interface (Rest Access)
User may access the web interface (Web Access)
User may access the xmlrpc interface (Xmlrpc Access)
User may create everything (Create)
User may edit everything (Edit)
User may manipulate user Roles through the web (Web Roles)
User may restore everything (Restore)
User may retire everything (Retire)
User may use the email interface (Email Access)
User may view everything (View)
Role "anonymous":
User may access the web interface (Web Access)
Role "cc-permission":
(Restore for "cost_center_permission_group" only)
(Retire for "cost_center_permission_group" only)
User is allowed to create cost_center_permission_group (Create for "cost_center_permission_group" only)
User is allowed to edit cost_center_permission_group (Edit for "cost_center_permission_group" only)
Role "contact":
User is allowed to create contact (Create for "contact" only)
User is allowed to edit contact (Edit for "contact" only)
Role "controlling":
User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only)
User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'team_lead') only)
User is allowed Edit on (Edit for "time_project": ('group_lead', 'team_lead') only)
User is allowed Edit on (Edit for "time_wp": ('project',) only)
User is allowed View on (View for "user": ('roles',) only)
User is allowed View on (View for "user_dynamic": ('id', 'sap_cc', 'user', 'valid_from', 'valid_to') only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access daily_record (View for "daily_record" only)
User is allowed to access daily_record_freeze (View for "daily_record_freeze" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access overtime_correction (View for "overtime_correction" only)
User is allowed to access query (View for "query" only)
User is allowed to access time_project (View for "time_project" only)
User is allowed to access time_record (View for "time_record" only)
User is allowed to access time_report (View for "time_report" only)
User is allowed to access time_wp (View for "time_wp" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
User is allowed to create cost_center (Create for "cost_center" only)
User is allowed to create cost_center_group (Create for "cost_center_group" only)
User is allowed to create cost_center_status (Create for "cost_center_status" only)
User is allowed to create department (Create for "department" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to create product_family (Create for "product_family" only)
User is allowed to create public_holiday (Create for "public_holiday" only)
User is allowed to create query (Create for "query" only)
User is allowed to create reporting_group (Create for "reporting_group" only)
User is allowed to create sap_cc (Create for "sap_cc" only)
User is allowed to create time_activity (Create for "time_activity" only)
User is allowed to create time_activity_perm (Create for "time_activity_perm" only)
User is allowed to create time_record (Create for "time_record" only)
User is allowed to create work_location (Create for "work_location" only)
User is allowed to edit cost_center (Edit for "cost_center" only)
User is allowed to edit cost_center_group (Edit for "cost_center_group" only)
User is allowed to edit cost_center_status (Edit for "cost_center_status" only)
User is allowed to edit department (Edit for "department" only)
User is allowed to edit organisation (Edit for "organisation" only)
User is allowed to edit product_family (Edit for "product_family" only)
User is allowed to edit public_holiday (Edit for "public_holiday" only)
User is allowed to edit query (Edit for "query" only)
User is allowed to edit reporting_group (Edit for "reporting_group" only)
User is allowed to edit sap_cc (Edit for "sap_cc" only)
User is allowed to edit time_activity (Edit for "time_activity" only)
User is allowed to edit time_activity_perm (Edit for "time_activity_perm" only)
User is allowed to edit time_record (Edit for "time_record" only)
User is allowed to edit work_location (Edit for "work_location" only)
Role "doc_admin":
User is allowed Edit on (Edit for "department": ('doc_num',) only)
User is allowed to create artefact (Create for "artefact" only)
User is allowed to create doc (Create for "doc" only)
User is allowed to create doc_category (Create for "doc_category" only)
User is allowed to create doc_status (Create for "doc_status" only)
User is allowed to create product_type (Create for "product_type" only)
User is allowed to create reference (Create for "reference" only)
User is allowed to edit artefact (Edit for "artefact" only)
User is allowed to edit doc (Edit for "doc" only)
User is allowed to edit doc_category (Edit for "doc_category" only)
User is allowed to edit doc_status (Edit for "doc_status" only)
User is allowed to edit product_type (Edit for "product_type" only)
User is allowed to edit reference (Edit for "reference" only)
Role "dom-user-edit-facility":
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['room'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['room'] only)
Role "dom-user-edit-gtt":
(Search for "user_dynamic" only)
May only view/edit records with the correct domain (Edit for "user_dynamic" only)
May only view/edit records with the correct domain (View for "user_dynamic" only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to create user (Create for "user" only)
User is allowed to create user_contact (Create for "user_contact" only)
User is allowed to create user_dynamic (Create for "user_dynamic" only)
User is allowed to edit user_contact (Edit for "user_contact" only)
Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only)
Role "dom-user-edit-hr":
(Search for "user_dynamic" only)
May only view/edit records with the correct domain (Edit for "user_dynamic" only)
May only view/edit records with the correct domain (View for "user_dynamic" only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to create user_contact (Create for "user_contact" only)
User is allowed to create user_dynamic (Create for "user_dynamic" only)
User is allowed to edit user_contact (Edit for "user_contact" only)
Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only)
Role "dom-user-edit-office":
User is allowed to create user_contact (Create for "user_contact" only)
User is allowed to edit user_contact (Edit for "user_contact" only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'position_text', 'room'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'position_text', 'room'] only)
Role "external":
(Search for "ext_tracker_state": ('id', 'issue') only)
(Search for "user": ('id', 'nickname', 'username') only)
External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (Edit for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only)
External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (View for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only)
User is allowed View on (View for "category": ('id', 'name') only)
User is allowed View on (View for "user": ('nickname', 'status', 'username') only)
User is allowed View on (View for "user_status": ('name',) only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only)
User is allowed to access area (View for "area" only)
User is allowed to access doc_issue_status (View for "doc_issue_status" only)
User is allowed to access ext_tracker (View for "ext_tracker" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to access keyword (View for "keyword" only)
User is allowed to access kind (View for "kind" only)
User is allowed to access msg_keyword (View for "msg_keyword" only)
User is allowed to access safety_level (View for "safety_level" only)
User is allowed to access severity (View for "severity" only)
User is allowed to access status (View for "status" only)
User is allowed to access status_transition (View for "status_transition" only)
User is allowed to access test_level (View for "test_level" only)
User is allowed to create file (Create for "file" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create msg (Create for "msg" only)
User is allowed to create query (Create for "query" only)
User is allowed to edit their queries (Edit for "query" only)
User is allowed to retire their queries (Retire for "query" only)
User is allowed to search for their own files (Search for "file" only)
User is allowed to search for their own messages (Search for "msg" only)
User is allowed to search for their queries (Search for "query" only)
User is allowed to search issue (Search for "issue" only)
User is allowed to view their own files (View for "file" only)
User may access the web interface (Web Access)
User may use the email interface (Email Access)
Users are allowed to edit some of their details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'password', 'timezone') only)
Users are allowed to view some of their details (View for "user": ('activity', 'actor', 'creation', 'creator', 'firstname', 'lastname', 'realname', 'username') only)
Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only)
Role "facility":
(Restore for "room" only)
(Retire for "room" only)
User is allowed to create room (Create for "room" only)
User is allowed to edit room (Edit for "room" only)
Role "functional-role":
(Restore for "user_functional_role" only)
(Retire for "user_functional_role" only)
User is allowed Edit on (Edit for "user": ('business_responsible', 'scale_seniority') only)
User is allowed View on (View for "user": ('business_responsible', 'planning_role', 'scale_seniority') only)
User is allowed to access user_functional_role (View for "user_functional_role" only)
User is allowed to create user_functional_role (Create for "user_functional_role" only)
User is allowed to edit user_functional_role (Edit for "user_functional_role" only)
Role "hr":
(Edit for "overtime_period": ('name', 'order') only)
(Restore for "room" only)
(Retire for "room" only)
User is allowed Edit on (Edit for "daily_record": ('required_overtime', 'weekend_allowed') only)
User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only)
User is allowed Edit on (Edit for "time_project": ('approval_hr', 'approval_required', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'no_overtime', 'no_overtime_day', 'only_hours', 'overtime_reduction') only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed to access auto_wp (View for "auto_wp" only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access daily_record (View for "daily_record" only)
User is allowed to access daily_record_freeze (View for "daily_record_freeze" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access overtime_correction (View for "overtime_correction" only)
User is allowed to access time_record (View for "time_record" only)
User is allowed to access user_contact (View for "user_contact" only)
User is allowed to access user_dynamic (View for "user_dynamic" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
User is allowed to create auto_wp (Create for "auto_wp" only)
User is allowed to create daily_record_freeze (Create for "daily_record_freeze" only)
User is allowed to create location (Create for "location" only)
User is allowed to create org_location (Create for "org_location" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to create overtime_correction (Create for "overtime_correction" only)
User is allowed to create overtime_period (Create for "overtime_period" only)
User is allowed to create product_family (Create for "product_family" only)
User is allowed to create public_holiday (Create for "public_holiday" only)
User is allowed to create reporting_group (Create for "reporting_group" only)
User is allowed to create room (Create for "room" only)
User is allowed to create sap_cc (Create for "sap_cc" only)
User is allowed to create time_record (Create for "time_record" only)
User is allowed to create uc_type (Create for "uc_type" only)
User is allowed to create user (Create for "user" only)
User is allowed to create user_dynamic (Create for "user_dynamic" only)
User is allowed to edit auto_wp (Edit for "auto_wp" only)
User is allowed to edit dynamic user data if not frozen in validity span of dynamic user record (Edit for "user_dynamic" only)
User is allowed to edit freeze record if not frozen at the given date (Edit for "daily_record_freeze": ('frozen',) only)
User is allowed to edit location (Edit for "location" only)
User is allowed to edit org_location (Edit for "org_location" only)
User is allowed to edit organisation (Edit for "organisation" only)
User is allowed to edit overtime correction if the overtime correction is not frozen (Edit for "overtime_correction" only)
User is allowed to edit product_family (Edit for "product_family" only)
User is allowed to edit public_holiday (Edit for "public_holiday" only)
User is allowed to edit reporting_group (Edit for "reporting_group" only)
User is allowed to edit room (Edit for "room" only)
User is allowed to edit sap_cc (Edit for "sap_cc" only)
User is allowed to edit time_record (Edit for "time_record" only)
User is allowed to edit uc_type (Edit for "uc_type" only)
User may manipulate user Roles through the web (Web Roles)
Role "hr-leave-approval":
User is allowed Edit on (Edit for "leave_submission": ('status',) only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
Role "hr-org-location":
(Search for "daily_record_freeze" only)
(Search for "overtime_correction" only)
(Search for "time_activity_perm" only)
(Search for "time_record" only)
(Search for "user_dynamic" only)
User is allowed to view dynamic user data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "user_dynamic" only)
User is allowed to view freeze information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "daily_record_freeze" only)
User is allowed to view overtime information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "overtime_correction" only)
User is allowed to view time record data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "time_record" only)
Role "hr-vacation":
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
User is allowed to create contract_type (Create for "contract_type" only)
User is allowed to create leave_submission (Create for "leave_submission" only)
User is allowed to create vacation_correction (Create for "vacation_correction" only)
User is allowed to edit contract_type (Edit for "contract_type" only)
User is allowed to edit leave_submission (Edit for "leave_submission" only)
User is allowed to edit vacation_correction (Edit for "vacation_correction" only)
Role "issue_admin":
User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only)
User is allowed to access issue (View for "issue" only)
User is allowed to create area (Create for "area" only)
User is allowed to create category (Create for "category" only)
User is allowed to create doc_issue_status (Create for "doc_issue_status" only)
User is allowed to create ext_tracker (Create for "ext_tracker" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create keyword (Create for "keyword" only)
User is allowed to create kind (Create for "kind" only)
User is allowed to create msg_keyword (Create for "msg_keyword" only)
User is allowed to create safety_level (Create for "safety_level" only)
User is allowed to create severity (Create for "severity" only)
User is allowed to create status (Create for "status" only)
User is allowed to create status_transition (Create for "status_transition" only)
User is allowed to create test_level (Create for "test_level" only)
User is allowed to edit area (Edit for "area" only)
User is allowed to edit category (Edit for "category" only)
User is allowed to edit doc_issue_status (Edit for "doc_issue_status" only)
User is allowed to edit ext_tracker (Edit for "ext_tracker" only)
User is allowed to edit issue (Edit for "issue" only)
User is allowed to edit keyword (Edit for "keyword" only)
User is allowed to edit kind (Edit for "kind" only)
User is allowed to edit msg_keyword (Edit for "msg_keyword" only)
User is allowed to edit safety_level (Edit for "safety_level" only)
User is allowed to edit severity (Edit for "severity" only)
User is allowed to edit status (Edit for "status" only)
User is allowed to edit status_transition (Edit for "status_transition" only)
User is allowed to edit test_level (Edit for "test_level" only)
Role "it":
Create (Create for "user_contact" only)
User is allowed Edit on (Edit for "file": ('name', 'type') only)
User is allowed Edit on (Edit for "location": ('domain_part',) only)
User is allowed Edit on (Edit for "organisation": ('domain_part',) only)
User is allowed Edit on (Edit for "user": ('ad_domain', 'nickname', 'password', 'pictures', 'roles', 'timetracking_by', 'timezone', 'username') only)
User is allowed Edit on (Edit for "user": ('address', 'alternate_addresses', 'nickname', 'password', 'timezone', 'username') only)
User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only)
User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed to access domain_permission (View for "domain_permission" only)
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
User is allowed to create domain_permission (Create for "domain_permission" only)
User is allowed to create it_category (Create for "it_category" only)
User is allowed to create it_int_prio (Create for "it_int_prio" only)
User is allowed to create it_issue (Create for "it_issue" only)
User is allowed to create it_project (Create for "it_project" only)
User is allowed to create it_request_type (Create for "it_request_type" only)
User is allowed to create mailgroup (Create for "mailgroup" only)
User is allowed to edit domain_permission (Edit for "domain_permission" only)
User is allowed to edit it_category (Edit for "it_category" only)
User is allowed to edit it_int_prio (Edit for "it_int_prio" only)
User is allowed to edit it_issue (Edit for "it_issue" only)
User is allowed to edit it_project (Edit for "it_project" only)
User is allowed to edit it_request_type (Edit for "it_request_type" only)
User is allowed to edit mailgroup (Edit for "mailgroup" only)
User may manipulate user Roles through the web (Web Roles)
Role "itview":
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
Role "msgedit":
(Search for "msg": ('date', 'id') only)
User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only)
User is allowed to access ext_msg (View for "ext_msg" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
Role "msgsync":
(Search for "msg": ('date', 'id') only)
User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only)
User is allowed to access ext_msg (View for "ext_msg" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to create ext_msg (Create for "ext_msg" only)
User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only)
User is allowed to edit ext_msg (Edit for "ext_msg" only)
User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only)
Role "nosy":
User may get nosy messages for doc (Nosy for "doc" only)
User may get nosy messages for issue (Nosy for "issue" only)
User may get nosy messages for it_issue (Nosy for "it_issue" only)
User may get nosy messages for it_project (Nosy for "it_project" only)
User may get nosy messages for support (Nosy for "support" only)
Role "office":
(Restore for "room" only)
(Retire for "room" only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed to access user_contact (View for "user_contact" only)
User is allowed to create absence (Create for "absence" only)
User is allowed to create absence_type (Create for "absence_type" only)
User is allowed to create room (Create for "room" only)
User is allowed to create uc_type (Create for "uc_type" only)
User is allowed to edit absence (Edit for "absence" only)
User is allowed to edit absence_type (Edit for "absence_type" only)
User is allowed to edit room (Edit for "room" only)
User is allowed to edit uc_type (Edit for "uc_type" only)
Role "organisation":
User is allowed to access location (View for "location" only)
User is allowed to access org_location (View for "org_location" only)
User is allowed to access organisation (View for "organisation" only)
User is allowed to create location (Create for "location" only)
User is allowed to create org_location (Create for "org_location" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to edit location (Edit for "location" only)
User is allowed to edit org_location (Edit for "org_location" only)
User is allowed to edit organisation (Edit for "organisation" only)
Role "pgp":
Role "procurement":
(View for "sap_cc" only)
(View for "time_project" only)
User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'purchasing_agents', 'team_lead') only)
User is allowed Edit on (Edit for "time_project": ('group_lead', 'purchasing_agents', 'team_lead') only)
Role "project":
User is allowed Edit on (Edit for "time_project": ('cost_center', 'department', 'deputy', 'description', 'name', 'nosy', 'organisation', 'responsible', 'status') only)
User is allowed Edit on (Edit for "time_project": ('infosec_req', 'is_extern', 'max_hours', 'op_project', 'planned_effort', 'product_family', 'project_type', 'reporting_group', 'work_location') only)
User is allowed to access time_project (View for "time_project" only)
User is allowed to access time_report (View for "time_report" only)
User is allowed to access time_wp (View for "time_wp" only)
User is allowed to create time_project (Create for "time_project" only)
User is allowed to create time_project_status (Create for "time_project_status" only)
User is allowed to create time_wp (Create for "time_wp" only)
User is allowed to create time_wp_group (Create for "time_wp_group" only)
User is allowed to edit time_project_status (Edit for "time_project_status" only)
User is allowed to edit time_wp (Edit for "time_wp" only)
User is allowed to edit time_wp_group (Edit for "time_wp_group" only)
Role "project_view":
User is allowed to access time_project (View for "time_project" only)
User is allowed to access time_report (View for "time_report" only)
User is allowed to access time_wp (View for "time_wp" only)
Role "sec-incident-nosy":
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
Role "sec-incident-responsible":
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
Role "staff-report":
Role "sub-login":
Role "summary_view":
Role "supportadmin":
User is allowed to access analysis_result (View for "analysis_result" only)
User is allowed to access contact (View for "contact" only)
User is allowed to access customer (View for "customer" only)
User is allowed to access customer_agreement (View for "customer_agreement" only)
User is allowed to access mailgroup (View for "mailgroup" only)
User is allowed to access return_type (View for "return_type" only)
User is allowed to access sup_classification (View for "sup_classification" only)
User is allowed to access support (View for "support" only)
User is allowed to create analysis_result (Create for "analysis_result" only)
User is allowed to create contact (Create for "contact" only)
User is allowed to create customer (Create for "customer" only)
User is allowed to create customer_agreement (Create for "customer_agreement" only)
User is allowed to create mailgroup (Create for "mailgroup" only)
User is allowed to create return_type (Create for "return_type" only)
User is allowed to create sup_classification (Create for "sup_classification" only)
User is allowed to create support (Create for "support" only)
User is allowed to edit analysis_result (Edit for "analysis_result" only)
User is allowed to edit contact (Edit for "contact" only)
User is allowed to edit customer (Edit for "customer" only)
User is allowed to edit customer_agreement (Edit for "customer_agreement" only)
User is allowed to edit mailgroup (Edit for "mailgroup" only)
User is allowed to edit return_type (Edit for "return_type" only)
User is allowed to edit sup_classification (Edit for "sup_classification" only)
User is allowed to edit support (Edit for "support" only)
Role "time-report":
User is allowed to access time_report (View for "time_report" only)
User is allowed to create time_report (Create for "time_report" only)
User is allowed to edit time_report (Edit for "time_report" only)
User may edit own file (file created by user) (Edit for "file" only)
Role "user":
(Search for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only)
(Search for "time_wp": ('activity', 'actor', 'auto_wp', 'bookers', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only)
(View for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only)
Search (Search for "user_contact" only)
User is allowed Edit on (Edit for "msg": ('keywords',) only)
User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only)
User is allowed Edit on issue if issue is non-confidential or user is on nosy list (Edit for "issue" only)
User is allowed Edit on it_issue if it_issue is non-confidential or user is on nosy list (Edit for "it_issue": ('messages', 'files', 'nosy') only)
User is allowed Edit on it_project if it_project is non-confidential or user is on nosy list (Edit for "it_project": ('messages', 'files', 'nosy') only)
User is allowed Edit on support if support is non-confidential or user is on nosy list (Edit for "support": ('analysis_end', 'analysis_result', 'analysis_start', 'bcc', 'business_unit', 'category', 'cc', 'cc_emails', 'classification', 'closed', 'confidential', 'customer', 'emails', 'execution', 'external_ref', 'files', 'goods_received', 'goods_sent', 'lot', 'messages', 'nosy', 'number_effected', 'numeric_effort', 'prio', 'prodcat', 'product', 'related_issues', 'related_support', 'release', 'responsible', 'return_type', 'sap_ref', 'send_to_customer', 'serial_number', 'set_first_reply', 'status', 'superseder', 'title', 'type', 'warranty') only)
User is allowed View on (View for "user": ('activity', 'actor', 'ad_domain', 'address', 'alternate_addresses', 'business_responsible', 'clearance_by', 'creation', 'creator', 'firstname', 'id', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'queries', 'realname', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'title', 'tt_lines', 'username') only)
User is allowed View on (View for "user": ('activity', 'actor', 'address', 'alternate_addresses', 'creation', 'creator', 'id', 'queries', 'realname', 'status', 'timezone', 'username') only)
User is allowed View on (View for "user": ('business_responsible', 'department_temp', 'timetracking_by', 'vie_user', 'vie_user_bl_override', 'vie_user_ml') only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed View on (View for "user_dynamic": ('department', 'org_location') only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed View on issue if issue is non-confidential or user is on nosy list (View for "issue" only)
User is allowed View on it_issue if it_issue is non-confidential or user is on nosy list (View for "it_issue" only)
User is allowed View on it_project if it_project is non-confidential or user is on nosy list (View for "it_project" only)
User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only)
User is allowed View on support if support is non-confidential or user is on nosy list (View for "support" only)
User is allowed to access absence (View for "absence" only)
User is allowed to access absence_type (View for "absence_type" only)
User is allowed to access analysis_result (View for "analysis_result" only)
User is allowed to access area (View for "area" only)
User is allowed to access artefact (View for "artefact" only)
User is allowed to access business_unit (View for "business_unit" only)
User is allowed to access category (View for "category" only)
User is allowed to access contact (View for "contact" only)
User is allowed to access contact_type (View for "contact_type" only)
User is allowed to access cost_center (View for "cost_center" only)
User is allowed to access cost_center_group (View for "cost_center_group" only)
User is allowed to access cost_center_permission_group (View for "cost_center_permission_group" only)
User is allowed to access cost_center_status (View for "cost_center_status" only)
User is allowed to access customer (View for "customer" only)
User is allowed to access customer_agreement (View for "customer_agreement" only)
User is allowed to access daily record if he is owner or supervisor or timetracking-by user (Edit for "daily_record": ('status', 'time_record') only)
User is allowed to access daily record if he is owner or supervisor or timetracking-by user (View for "daily_record" only)
User is allowed to access daily_record_status (View for "daily_record_status" only)
User is allowed to access department (View for "department" only)
User is allowed to access doc (View for "doc" only)
User is allowed to access doc_category (View for "doc_category" only)
User is allowed to access doc_issue_status (View for "doc_issue_status" only)
User is allowed to access doc_status (View for "doc_status" only)
User is allowed to access ext_tracker (View for "ext_tracker" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to access functional_role (View for "functional_role" only)
User is allowed to access it_category (View for "it_category" only)
User is allowed to access it_issue_status (View for "it_issue_status" only)
User is allowed to access it_prio (View for "it_prio" only)
User is allowed to access it_project_status (View for "it_project_status" only)
User is allowed to access it_request_type (View for "it_request_type" only)
User is allowed to access keyword (View for "keyword" only)
User is allowed to access kind (View for "kind" only)
User is allowed to access leave_status (View for "leave_status" only)
User is allowed to access location (View for "location" only)
User is allowed to access mailgroup (View for "mailgroup" only)
User is allowed to access msg_keyword (View for "msg_keyword" only)
User is allowed to access org_group (View for "org_group" only)
User is allowed to access org_location (View for "org_location" only)
User is allowed to access organisation (View for "organisation" only)
User is allowed to access overtime_period (View for "overtime_period" only)
User is allowed to access prodcat (View for "prodcat" only)
User is allowed to access product (View for "product" only)
User is allowed to access product_family (View for "product_family" only)
User is allowed to access product_type (View for "product_type" only)
User is allowed to access project_type (View for "project_type" only)
User is allowed to access public_holiday (View for "public_holiday" only)
User is allowed to access reference (View for "reference" only)
User is allowed to access reporting_group (View for "reporting_group" only)
User is allowed to access return_type (View for "return_type" only)
User is allowed to access room (View for "room" only)
User is allowed to access safety_level (View for "safety_level" only)
User is allowed to access sap_cc (View for "sap_cc" only)
User is allowed to access severity (View for "severity" only)
User is allowed to access sex (View for "sex" only)
User is allowed to access status (View for "status" only)
User is allowed to access status_transition (View for "status_transition" only)
User is allowed to access summary_report (View for "summary_report" only)
User is allowed to access summary_type (View for "summary_type" only)
User is allowed to access sup_classification (View for "sup_classification" only)
User is allowed to access sup_execution (View for "sup_execution" only)
User is allowed to access sup_prio (View for "sup_prio" only)
User is allowed to access sup_status (View for "sup_status" only)
User is allowed to access sup_type (View for "sup_type" only)
User is allowed to access sup_warranty (View for "sup_warranty" only)
User is allowed to access test_level (View for "test_level" only)
User is allowed to access time_activity (View for "time_activity" only)
User is allowed to access time_activity_perm (View for "time_activity_perm" only)
User is allowed to access time_project_status (View for "time_project_status" only)
User is allowed to access time_wp_group (View for "time_wp_group" only)
User is allowed to access time_wp_summary_no (View for "time_wp_summary_no" only)
User is allowed to access timesheet (View for "timesheet" only)
User is allowed to access uc_type (View for "uc_type" only)
User is allowed to access user_status (View for "user_status" only)
User is allowed to access vac_aliq (View for "vac_aliq" only)
User is allowed to access vacation_report (View for "vacation_report" only)
User is allowed to access work_location (View for "work_location" only)
User is allowed to create daily_record (Create for "daily_record" only)
User is allowed to create doc (Create for "doc" only)
User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only)
User is allowed to create file (Create for "file" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create it_issue (Create for "it_issue" only)
User is allowed to create leave_submission (Create for "leave_submission" only)
User is allowed to create msg (Create for "msg" only)
User is allowed to create queries (Create for "query" only)
User is allowed to create support (Create for "support" only)
User is allowed to create time_record (Create for "time_record" only)
User is allowed to create time_wp (Create for "time_wp" only)
User is allowed to edit (some of) their own user details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'lunch_duration', 'lunch_start', 'password', 'queries', 'realname', 'room', 'subst_active', 'substitute', 'timezone', 'tt_lines') only)
User is allowed to edit category if he is responsible for it (Edit for "category": ('nosy', 'default_part_of') only)
User is allowed to edit doc (Edit for "doc" only)
User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only)
User is allowed to edit if he's the owner of the contact (Edit for "user_contact": ('visible',) only)
User is allowed to edit several fields if he is Responsible for an it_issue (Edit for "it_issue": ('responsible',) only)
User is allowed to edit several fields if he is Stakeholder/Responsible for an it_issue (Edit for "it_issue": ('deadline', 'status', 'title') only)
User is allowed to edit their queries (Edit for "query" only)
User is allowed to edit time category if the status is "Open" and he is responsible for the time category (Edit for "time_project": ('deputy', 'planned_effort', 'nosy') only)
User is allowed to edit workpackage if he is time category owner or deputy (Edit for "time_wp": ('cost_center', 'is_public', 'name', 'responsible', 'time_wp_summary_no', 'wp_no') only)
User is allowed to retire their queries (Retire for "query" only)
User is allowed to search daily_record (Search for "daily_record" only)
User is allowed to search for their own files (Search for "file" only)
User is allowed to search for their own messages (Search for "msg" only)
User is allowed to search for their queries (Search for "query" only)
User is allowed to search issue (Search for "issue" only)
User is allowed to search it_issue (Search for "it_issue" only)
User is allowed to search it_project (Search for "it_project" only)
User is allowed to search leave_submission (Search for "leave_submission" only)
User is allowed to search support (Search for "support" only)
User is allowed to search time_record (Search for "time_record" only)
User is allowed to search time_wp (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'is_extern', 'is_public', 'id', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only)
User is allowed to search user_status (Search for "user": ('status',) only)
User is allowed to see time record if he is allowed to see all details on work package or User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "time_record" only)
User is allowed to view (some of) their own user details (View for "user": ('entry_date', 'planning_role') only)
User is allowed to view contact if he's the owner of the contact or the contact is marked visible (View for "user_contact" only)
User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (Edit for "leave_submission": ('status',) only)
User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (View for "leave_submission" only)
User is allowed to view selected fields in work package if booking is allowed for this user (also applies to timetracking by, supervisor and approval delegated) (View for "time_wp": ('activity', 'actor', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only)
User is allowed to view their own files (View for "file" only)
User is allowed to view their own messages (View for "msg" only)
User is allowed to view their own overtime information (View for "overtime_correction" only)
User is allowed to view time record if he is the supervisor or the person to whom approvals are delegated (View for "time_record" only)
User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_project": ('name',) only)
User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_wp": ('name', 'project') only)
User is allowed to view/edit workpackage if he is owner or project responsible/deputy (Edit for "time_wp": ('bookers', 'description', 'epic_key', 'planned_effort', 'time_end', 'time_start', 'time_wp_summary_no') only)
User may access the rest interface (Rest Access)
User may access the web interface (Web Access)
User may access the xmlrpc interface (Xmlrpc Access)
User may edit own leave submissions (Edit for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only)
User may edit own leave submissions (View for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only)
User may see time report if reponsible or deputy of time project or on nosy list of time project (View for "time_report" only)
User may use the email interface (Email Access)
User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "daily_record" only)
User may view their own user functional role (View for "user_functional_role" only)
User may view time category if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_project" only)
User may view work package if responsible for it, if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_wp" only)
User or Timetracking by user may edit time_records owned by user (Edit for "time_record" only)
User or Timetracking by user may edit time_records owned by user (Restore for "time_record" only)
User or Timetracking by user may edit time_records owned by user (Retire for "time_record" only)
User or Timetracking by user may edit time_records owned by user (View for "time_record" only)
Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only)
Users may see daily record if they may see one of the time_records for that day (View for "daily_record" only)
Role "user_view":
User is allowed to access user (View for "user" only)
Role "vacation-report":
""".strip ()
| 1.78125 | 2 |
CodeChef/problems/IMDB/main.py | object-oriented-human/competitive | 1 | 2382 | <filename>CodeChef/problems/IMDB/main.py
tc = int(input())
while tc:
tc -= 1
best = 0
n, x = map(int, input().split())
for i in range(n):
s, r = map(int, input().split())
if x >= s:
best = max(best, r)
print(best) | 3.34375 | 3 |
tests/test_sqlite_wrapper.py | Privex/python-db | 1 | 2383 | <gh_stars>1-10
"""
Tests related to :class:`.SqliteWrapper` / :class:`.ExampleWrapper`
"""
# from unittest import TestCase
from tests.base import *
class TestSQLiteWrapper(PrivexDBTestBase):
def test_tables_created(self):
w = self.wrp
self.assertEqual(w.db, ':memory:')
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
def test_tables_drop(self):
w = self.wrp
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
w.drop_schemas()
tables = w.list_tables()
self.assertNotIn('users', tables)
self.assertNotIn('items', tables)
def test_insert_find_user(self):
w = self.wrp
w.query_mode = 'flat'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user[1], 'John')
self.assertEqual(user[2], 'Doe')
def test_action_update(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
last_id = res.lastrowid
rows = w.action("UPDATE users SET last_name = ? WHERE first_name = ?", ['Smith', 'John'])
self.assertEqual(rows, 1)
john = w.find_user(last_id)
self.assertEqual(john['last_name'], 'Smith')
def test_find_user_dict_mode(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'John')
self.assertEqual(user['last_name'], 'Doe')
def test_find_user_nonexistent(self):
w = self.wrp
user = w.find_user(99)
self.assertIsNone(user)
def test_get_users_tuple(self):
w = self.wrp
w.query_mode = 'flat'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0][1], 'John')
self.assertEqual(users[1][1], 'Jane')
self.assertEqual(users[1][2], 'Doe')
self.assertEqual(users[2][2], 'Johnson')
def test_get_users_dict(self):
w = self.wrp
w.query_mode = 'dict'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0]['first_name'], 'John')
self.assertEqual(users[1]['first_name'], 'Jane')
self.assertEqual(users[1]['last_name'], 'Doe')
self.assertEqual(users[2]['last_name'], 'Johnson')
def test_insert_helper(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert('users', first_name='Dave', last_name='Johnson')
self.assertEqual(res.lastrowid, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'Dave')
self.assertEqual(user['last_name'], 'Johnson')
| 2.515625 | 3 |
var/spack/repos/builtin/packages/strumpack/package.py | robertodr/spack | 9 | 2384 | <gh_stars>1-10
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Strumpack(CMakePackage, CudaPackage):
"""STRUMPACK -- STRUctured Matrix PACKage - provides linear solvers
for sparse matrices and for dense rank-structured matrices, i.e.,
matrices that exhibit some kind of low-rank property. It provides a
distributed memory fully algebraic sparse solver and
preconditioner. The preconditioner is mostly aimed at large sparse
linear systems which result from the discretization of a partial
differential equation, but is not limited to any particular type of
problem. STRUMPACK also provides preconditioned GMRES and BiCGStab
iterative solvers."""
homepage = "http://portal.nersc.gov/project/sparse/strumpack"
url = "https://github.com/pghysels/STRUMPACK/archive/v4.0.0.tar.gz"
git = "https://github.com/pghysels/STRUMPACK.git"
maintainers = ['pghysels']
version('master', branch='master')
version('5.0.0', sha256='bdfd1620ff7158d96055059be04ee49466ebaca8213a2fdab33e2d4571019a49')
version('4.0.0', sha256='a3629f1f139865c74916f8f69318f53af6319e7f8ec54e85c16466fd7d256938')
version('3.3.0', sha256='499fd3b58656b4b6495496920e5372895861ebf15328be8a7a9354e06c734bc7')
version('3.2.0', sha256='34d93e1b2a3b8908ef89804b7e08c5a884cbbc0b2c9f139061627c0d2de282c1')
version('3.1.1', sha256='c1c3446ee023f7b24baa97b24907735e89ce4ae9f5ef516645dfe390165d1778')
variant('shared', default=False, description='Build shared libraries')
variant('mpi', default=True, description='Use MPI')
variant('openmp', default=True,
description='Enable thread parallellism via tasking with OpenMP')
variant('cuda', default=True,
description='Enable CUDA support')
variant('parmetis', default=True,
description='Enable use of ParMetis')
variant('scotch', default=False,
description='Enable use of Scotch')
variant('butterflypack', default=True,
description='Enable use of ButterflyPACK')
variant('zfp', default=True,
description='Build with support for compression using ZFP')
variant('c_interface', default=True,
description='Enable C interface')
variant('count_flops', default=False,
description='Build with flop counters')
variant('task_timers', default=False,
description='Build with timers for internal routines')
variant('build_dev_tests', default=False,
description='Build developer test routines')
variant('build_tests', default=False,
description='Build test routines')
# TODO: add a slate variant
depends_on('[email protected]:', type='build')
depends_on('mpi', when='+mpi')
depends_on('blas')
depends_on('lapack')
depends_on('scalapack', when='+mpi')
depends_on('metis')
depends_on('parmetis', when='+parmetis')
depends_on('scotch~metis', when='+scotch')
depends_on('scotch~metis+mpi', when='+scotch+mpi')
depends_on('[email protected]', when='@3.3.0:3.9.999 +butterflypack+mpi')
depends_on('[email protected]:', when='@4.0.0: +butterflypack+mpi')
depends_on('cuda', when='@4.0.0: +cuda')
depends_on('zfp', when='+zfp')
conflicts('+parmetis', when='~mpi')
conflicts('+butterflypack', when='~mpi')
conflicts('+butterflypack', when='@:3.2.0')
conflicts('+cuda', when='@:3.9.999')
conflicts('+zfp', when='@:3.9.999')
patch('intel-19-compile.patch', when='@3.1.1')
def cmake_args(self):
spec = self.spec
def on_off(varstr):
return 'ON' if varstr in spec else 'OFF'
args = [
'-DSTRUMPACK_USE_MPI=%s' % on_off('+mpi'),
'-DSTRUMPACK_USE_OPENMP=%s' % on_off('+openmp'),
'-DTPL_ENABLE_PARMETIS=%s' % on_off('+parmetis'),
'-DTPL_ENABLE_SCOTCH=%s' % on_off('+scotch'),
'-DTPL_ENABLE_BPACK=%s' % on_off('+butterflypack'),
'-DSTRUMPACK_COUNT_FLOPS=%s' % on_off('+count_flops'),
'-DSTRUMPACK_TASK_TIMERS=%s' % on_off('+task_timers'),
'-DSTRUMPACK_DEV_TESTING=%s' % on_off('+build_dev_tests'),
'-DSTRUMPACK_BUILD_TESTS=%s' % on_off('+build_tests'),
'-DTPL_BLAS_LIBRARIES=%s' % spec['blas'].libs.joined(";"),
'-DTPL_LAPACK_LIBRARIES=%s' % spec['lapack'].libs.joined(";"),
'-DTPL_SCALAPACK_LIBRARIES=%s' % spec['scalapack'].
libs.joined(";"),
]
if spec.satisfies('@:3.9.999'):
if '+mpi' in spec:
args.extend([
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc
])
args.extend([
'-DSTRUMPACK_C_INTERFACE=%s' % on_off('+c_interface'),
])
if spec.satisfies('@4.0.0:'):
args.extend([
'-DSTRUMPACK_USE_CUDA=%s' % on_off('+cuda')
])
args.extend([
'-DBUILD_SHARED_LIBS=%s' % on_off('+shared')
])
return args
| 1.710938 | 2 |
actionserver/actions/action_feedbackform.py | Ajju2211/frendy-bot | 0 | 2385 | from typing import Any, Text, Dict, List, Union
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction
# from rasa_core.events import (UserUtteranceReverted, UserUttered,
# ActionExecuted, Event)
from rasa_sdk.events import AllSlotsReset, SlotSet
from rasa.core.constants import REQUESTED_SLOT
from rasa.core.slots import Slot
import pandas as pd
import json
from actionserver.utils import utilities as util
from actionserver.controllers.faqs.faq import FAQ
from actionserver.controllers.constants.orderForm import *
import logging
from actionserver.utils.utilities import INVALID_VALUE
product_list = []
quant_list = [] # takes quantity from user
logger = logging.getLogger(__name__)
with open(r'./actionserver/custom_payload.json') as f:
frendy_product_menu = json.load(f)
# Code snippet for global back
# return [Restarted(), UserUttered(text="/get_started", parse_data={
# "intent": {"confidence": 1.0, "name": "get_started"},
# "entities": []
# }), FollowupAction(name="utter_greet")]
def query_back(dispatcher):
dispatcher.utter_message("Going back to queries!!!")
greet_utter = UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
})
query_utter = UserUttered(text="/query_init", parse_data={
"intent": {"confidence": 1.0, "name": "query_init"},
"entities": []
})
return [
greet_utter,
FollowupAction(name="utter_greet"),
query_utter,
FollowupAction(name="utter_query_type")
]
def greet_back(dispatcher):
dispatcher.utter_message("Going back!!!")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Welcome back to Frendy Shopping"
});
return [UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
}), FollowupAction(name="utter_greet")]
class FeedbackForm(FormAction):
def name(self):
return "feedback_form"
@staticmethod
def required_slots(tracker):
if tracker.get_slot("rating"):
return ["rating", "feedback_text"]
else:
return ["rating"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
# return {"rating": [self.from_entity("rating"),self.from_entity("any_thing")],"feedback_text": [self.from_entity(entity="any_thing"),self.from_entity(entity="navigation")]}
return {"rating": [self.from_entity("rating"), self.from_text()], "feedback_text": [self.from_text(), self.from_entity(entity="navigation")]}
def validate_rating(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
ratings = ['1', '2', '3', '4', '5']
try:
value = value.strip()
if value == "back1" or value.lower() == "back":
return {"rating": INVALID_VALUE, "feedback_text": INVALID_VALUE}
# 1-5 it integer otherwise rating:None
elif value in ratings:
return {"rating": value, "feedback_text": None}
else:
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
except Exception as e:
print(e)
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
def validate_feedback_text(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
if value == "back2" or value.lower() == "back":
return {"rating": None, "feedback_text": None}
else:
return {"feedback_text": value}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
if tracker.get_slot("rating") != INVALID_VALUE:
with open("./actionserver/customer_queries.json", "r") as queriesRef:
rating = tracker.get_slot("rating")
feedback = tracker.get_slot("feedback_text")
feedbackObj = json.load(queriesRef)
feedbackObj["feedback"].append({
"createdOn": util.timestamp(),
"complaint_area": rating,
"complaint": feedback
})
with open("./actionserver/customer_queries.json", "w") as queriesRefWrite:
json.dump(feedbackObj, queriesRefWrite, indent=4)
dispatcher.utter_message("Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback))
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback)
});
else:
dispatcher.utter_message("Feedback form closed")
li = [SlotSet("rating", None), SlotSet("feedback_text", None)]
li.extend(query_back(dispatcher))
return li
return [SlotSet("rating", None), SlotSet("feedback_text", None)]
| 2.125 | 2 |
dash/graphs.py | fuzzylabs/wearable-my-foot | 5 | 2386 | import plotly.graph_objs as go
class GraphsHelper:
template = "plotly_dark"
'''
Generate a plot for a timeseries
'''
def generate_timeseries_plot(self, dataframe):
pressure_plots = []
for sensor in ["p1", "p2", "p3"]:
series = dataframe[sensor]
scatter = go.Scatter(x = dataframe.index,
y = series,
name = f"Sensor {sensor}",
opacity = 0.4)
pressure_plots.append(scatter)
pressure_figure = go.Figure(
data = pressure_plots,
layout = go.Layout(
title = "Pressure timeseries",
template = self.template
)
)
return pressure_figure
| 3.140625 | 3 |
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py | JonathanGailliez/azure-sdk-for-python | 1 | 2387 | <reponame>JonathanGailliez/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AppServiceCertificateResource(Resource):
"""Key Vault container ARM resource for a certificate that is purchased
through Azure.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values
include: 'Initialized', 'WaitingOnCertificateOrder', 'Succeeded',
'CertificateOrderFailed', 'OperationNotPermittedOnKeyVault',
'AzureServiceUnauthorizedToAccessKeyVault', 'KeyVaultDoesNotExist',
'KeyVaultSecretDoesNotExist', 'UnknownError', 'ExternalPrivateKey',
'Unknown'
:vartype provisioning_state: str or
~azure.mgmt.web.models.KeyVaultSecretStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'KeyVaultSecretStatus'},
}
def __init__(self, **kwargs):
super(AppServiceCertificateResource, self).__init__(**kwargs)
self.key_vault_id = kwargs.get('key_vault_id', None)
self.key_vault_secret_name = kwargs.get('key_vault_secret_name', None)
self.provisioning_state = None
| 2.40625 | 2 |
telethon/tl/functions/stickers.py | polisitni1/DogeClickBot | 0 | 2388 | """File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
if TYPE_CHECKING:
from ...tl.types import TypeInputStickerSet, TypeInputUser, TypeInputStickerSetItem, TypeInputDocument
class AddStickerToSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x8653febe
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, stickerset, sticker):
"""
:param TypeInputStickerSet stickerset:
:param TypeInputStickerSetItem sticker:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.stickerset = stickerset # type: TypeInputStickerSet
self.sticker = sticker # type: TypeInputStickerSetItem
def to_dict(self):
return {
'_': 'AddStickerToSetRequest',
'stickerset': None if self.stickerset is None else self.stickerset.to_dict(),
'sticker': None if self.sticker is None else self.sticker.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xbe\xfeS\x86',
bytes(self.stickerset),
bytes(self.sticker),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
_sticker = reader.tgread_object()
return cls(stickerset=_stickerset, sticker=_sticker)
class ChangeStickerPositionRequest(TLRequest):
CONSTRUCTOR_ID = 0xffb6d4ca
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker, position):
"""
:param TypeInputDocument sticker:
:param int position:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.sticker = sticker # type: TypeInputDocument
self.position = position # type: int
def to_dict(self):
return {
'_': 'ChangeStickerPositionRequest',
'sticker': None if self.sticker is None else self.sticker.to_dict(),
'position': self.position
}
def __bytes__(self):
return b''.join((
b'\xca\xd4\xb6\xff',
bytes(self.sticker),
struct.pack('<i', self.position),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
_position = reader.read_int()
return cls(sticker=_sticker, position=_position)
class CreateStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x9bd86e6a
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, user_id, title, short_name, stickers, masks=None):
"""
:param TypeInputUser user_id:
:param str title:
:param str short_name:
:param List[TypeInputStickerSetItem] stickers:
:param Optional[bool] masks:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.user_id = user_id # type: TypeInputUser
self.title = title # type: str
self.short_name = short_name # type: str
self.stickers = stickers # type: List[TypeInputStickerSetItem]
self.masks = masks # type: Optional[bool]
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'CreateStickerSetRequest',
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'title': self.title,
'short_name': self.short_name,
'stickers': [] if self.stickers is None else [None if x is None else x.to_dict() for x in self.stickers],
'masks': self.masks
}
def __bytes__(self):
return b''.join((
b'jn\xd8\x9b',
struct.pack('<I', (0 if self.masks is None or self.masks is False else 1)),
bytes(self.user_id),
self.serialize_bytes(self.title),
self.serialize_bytes(self.short_name),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(bytes(x) for x in self.stickers),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_masks = bool(flags & 1)
_user_id = reader.tgread_object()
_title = reader.tgread_string()
_short_name = reader.tgread_string()
reader.read_int()
_stickers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickers.append(_x)
return cls(user_id=_user_id, title=_title, short_name=_short_name, stickers=_stickers, masks=_masks)
class RemoveStickerFromSetRequest(TLRequest):
CONSTRUCTOR_ID = 0xf7760f51
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker):
"""
:param TypeInputDocument sticker:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.sticker = sticker # type: TypeInputDocument
def to_dict(self):
return {
'_': 'RemoveStickerFromSetRequest',
'sticker': None if self.sticker is None else self.sticker.to_dict()
}
def __bytes__(self):
return b''.join((
b'Q\x0fv\xf7',
bytes(self.sticker),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
return cls(sticker=_sticker)
| 2.3125 | 2 |
applications/ChimeraApplication/tests/chimera_analysis_base_test.py | lkusch/Kratos | 778 | 2389 | <gh_stars>100-1000
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as UnitTest
import KratosMultiphysics.ChimeraApplication
from KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis import FluidChimeraAnalysis
class ChimeraAnalysisBaseTest(UnitTest.TestCase):
def setUp(self):
# Set to true to get post-process files for the test
self.print_output = False
def _run_test(self,settings_file_name):
model = KratosMultiphysics.Model()
with open(settings_file_name,'r') as settings_file:
settings = KratosMultiphysics.Parameters(settings_file.read())
# to check the results: add output settings block if needed
if self.print_output:
settings.AddValue("output_processes", KratosMultiphysics.Parameters(r'''{
"vtk_output" : [{
"python_module" : "vtk_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "VtkOutputProcess",
"help" : "This process writes postprocessing files for Paraview",
"Parameters" : {
"model_part_name" : "FluidModelPart.Parts_background_surface",
"output_control_type" : "step",
"output_frequency" : 1,
"file_format" : "ascii",
"output_precision" : 3,
"output_sub_model_parts" : false,
"write_deformed_configuration" : true,
"folder_name" : "test_vtk_output",
"save_output_files_in_folder" : true,
"nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],
"nodal_data_value_variables" : [],
"element_flags" : ["ACTIVE"],
"nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],
"element_data_value_variables" : [],
"condition_data_value_variables" : []
}
},{
"python_module" : "vtk_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "VtkOutputProcess",
"help" : "This process writes postprocessing files for Paraview",
"Parameters" : {
"model_part_name" : "FluidModelPart.Parts_patch_surface",
"output_control_type" : "step",
"output_frequency" : 1,
"file_format" : "ascii",
"output_precision" : 3,
"output_sub_model_parts" : false,
"write_deformed_configuration" : true,
"folder_name" : "test_vtk_output",
"save_output_files_in_folder" : true,
"nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],
"nodal_data_value_variables" : [],
"element_flags" : ["ACTIVE"],
"nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],
"element_data_value_variables" : [],
"condition_data_value_variables" : []
}
}]
}'''))
analysis = FluidChimeraAnalysis(model,settings)
analysis.Run()
| 2.15625 | 2 |
parsers/rss10.py | side-beach-city/SBCLinkCopyTool | 0 | 2390 | <filename>parsers/rss10.py
import urllib.request
import xml.etree.ElementTree
class RSS10Parser:
def __init__(self, url: str) -> None:
self.url = url
def getlist(self) -> list[dict[str, str]]:
ENTRY = r"{http://www.w3.org/2005/Atom}"
MEDIA = r"{http://search.yahoo.com/mrss/}"
YOUTUBE = r"{http://www.youtube.com/xml/schemas/2015}"
result = []
with urllib.request.urlopen(self.url) as res:
data = xml.etree.ElementTree.fromstring(res.read())
for child in data.iter(f"{ENTRY}entry"):
result.append({
"title": child.find(f"{ENTRY}title").text,
"link": child.find(f"{ENTRY}link").attrib["href"],
"description": child.find(f"{MEDIA}group").find(f"{MEDIA}description").text,
})
return result
if __name__ == "__main__":
import pprint
pprint.pprint(RSS10Parser("https://www.youtube.com/feeds/videos.xml?playlist_id=PLrPVslFukDQo7l5RCqAZtKDl6tUyMAFWH").getlist()) | 3.09375 | 3 |
examples/laser.py | MPI-IS/reactive_pepper | 0 | 2391 | <gh_stars>0
import math,time,random
import pepper_interface
IP = "192.168.0.147"
PORT = 9559
simulation = False
with pepper_interface.get(IP,PORT,simulation) as pepper:
time.sleep(1.0)
values,time_stamp = pepper.laser.get()
print
print "Front"
print values["Front"]
print
print "Left"
print values["Left"]
print
print "Right"
print values["Right"]
print
| 2.5 | 2 |
exercises/pt/exc_01_03_01.py | Jette16/spacy-course | 2,085 | 2392 | # Importar a classe da língua inglesa (English) e criar um objeto nlp
from ____ import ____
nlp = ____
# Processar o texto
doc = ____("I like tree kangaroos and narwhals.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
| 3.296875 | 3 |
tests/integration/mci/test_happy_path.py | qateam123/eq | 0 | 2393 | from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
class TestHappyPath(IntegrationTestCase):
def test_happy_path_203(self):
self.happy_path('0203', '1')
def test_happy_path_205(self):
self.happy_path('0205', '1')
def happy_path(self, form_type_id, eq_id):
# Get a token
token = create_token(form_type_id, eq_id)
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the landing page
content = resp.get_data(True)
self.assertRegex(content, '<title>Introduction</title>')
self.assertRegex(content, '>Start survey<')
self.assertRegex(content, 'Monthly Business Survey - Retail Sales Index')
# We proceed to the questionnaire
post_data = {
'action[start_questionnaire]': 'Start Questionnaire'
}
resp = self.client.post('/questionnaire/' + eq_id + '/' + form_type_id + '/789/introduction', data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
block_one_url = resp.location
resp = self.client.get(block_one_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are in the Questionnaire
content = resp.get_data(True)
self.assertRegex(content, '<title>Survey</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, "What are the dates of the sales period you are reporting for?")
self.assertRegex(content, ">Save and continue<")
# check with have some guidance
self.assertRegex(content, "alcoholic drink")
# We fill in our answers
form_data = {
# Start Date
"period-from-day": "01",
"period-from-month": "4",
"period-from-year": "2016",
# End Date
"period-to-day": "30",
"period-to-month": "04",
"period-to-year": "2016",
# Total Turnover
"total-retail-turnover": "100000",
# User Action
"action[save_continue]": "Save & Continue"
}
# We submit the form
resp = self.client.post(block_one_url, data=form_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# There are no validation errors
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/summary$')
summary_url = resp.location
resp = self.client.get(summary_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are on the review answers page
content = resp.get_data(True)
self.assertRegex(content, '<title>Summary</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, '>Your responses<')
self.assertRegex(content, 'Please check carefully before submission.')
self.assertRegex(content, '>Submit answers<')
# We submit our answers
post_data = {
"action[submit_answers]": "Submit answers"
}
resp = self.client.post(summary_url, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/thank-you$')
resp = self.client.get(resp.location, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the thank you page
content = resp.get_data(True)
self.assertRegex(content, '<title>Submission Successful</title>')
self.assertRegex(content, '(?s)Monthly Business Survey - Retail Sales Index.*?Monthly Business Survey - Retail Sales Index')
| 2.421875 | 2 |
src/transformers/models/hubert/modeling_tf_hubert.py | OllieBroadhurst/transformers | 1 | 2394 | <gh_stars>1-10
# coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TensorFlow Hubert model."""
import inspect
import warnings
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput
from ...modeling_tf_utils import TFPreTrainedModel, booleans_processing, get_initializer, keras_serializable
from ...tf_utils import shape_list
from ...tokenization_utils_base import BatchEncoding
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_hubert import HubertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "HubertConfig"
TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/hubert-base-ls960",
# See all Hubert models at https://huggingface.co./models?filter=hubert
]
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.input_values_processing
def input_values_processing(func, config, input_values, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_values = tf.keras.Input(shape=(128,),
dtype='float32', name="input_values")` otherwise the order of the tensors will not be guaranteed during the
training.
Args:
func (`callable`):
The callable function of the TensorFlow model.
config ([`PretrainedConfig`]):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_values, (tuple, list)):
for i, input in enumerate(input_values):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_values, (dict, BatchEncoding)):
if "inputs" in input_values:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead.",
FutureWarning,
)
output["input_values"] = input_values.pop("inputs")
if "decoder_cached_states" in input_values:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_values.pop("decoder_cached_states")
for k, v in dict(input_values).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_values, tf.Tensor) or input_values is None:
output[parameter_names[0]] = input_values
else:
raise ValueError(
f"Data of type {type(input_values)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_values`
output["input_values"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(booleans_processing(config=config, **boolean_dict))
return output
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement
def _sample_without_replacement(distribution, num_samples):
"""
Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
https://github.com/tensorflow/tensorflow/issues/9260 for more info
"""
z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1))
_, indices = tf.nn.top_k(distribution + z, num_samples)
return indices
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices
def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
"""
Scatter function as in PyTorch with indices in format (batch_dim, indixes)
"""
indices_shape = shape_list(batch_indices)
# broadcast batch dim to indices_shape
broad_casted_batch_dims = tf.reshape(
tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]
)
# transform batch_indices to pair_indices
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
min_masks: int = 0,
) -> tf.Tensor:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob:
probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_length: size of the mask
min_masks: minimum number of masked spans
Adapted from [fairseq's
data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376).
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
)
# compute number of masked spans in batch
num_masked_spans = int(mask_prob * sequence_length / mask_length + tf.random.uniform((1,)))
num_masked_spans = max(num_masked_spans, min_masks)
# make sure num masked indices <= sequence_length
if num_masked_spans * mask_length > sequence_length:
num_masked_spans = sequence_length // mask_length
# SpecAugment mask to fill
spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32)
# uniform distribution to sample from, make sure that offset samples are < sequence_length
uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1)))
# get random indices to mask
spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans)
# expand masked indices to masked spans
spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1)
spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length))
spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))
offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :]
offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1))
offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length))
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# scatter indices to mask
spec_aug_mask = _scatter_values_on_batch_indices(
tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, spec_aug_mask.shape
)
return spec_aug_mask
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert
class TFHubertGroupNorm(tf.keras.layers.Layer):
"""
From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization
"""
def __init__(
self,
groups: int = 32,
axis: int = -1,
epsilon: float = 1e-3,
center: bool = True,
scale: bool = True,
beta_initializer: tf.keras.initializers.Initializer = "zeros",
gamma_initializer: tf.keras.initializers.Initializer = "ones",
beta_regularizer: tf.keras.regularizers.Regularizer = None,
gamma_regularizer: tf.keras.regularizers.Regularizer = None,
beta_constraint: tf.keras.constraints.Constraint = None,
gamma_constraint: tf.keras.constraints.Constraint = None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = tf.keras.initializers.get(beta_initializer)
self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)
self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)
self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)
self.beta_constraint = tf.keras.constraints.get(beta_constraint)
self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)
self._check_axis()
def build(self, input_shape):
self._check_if_input_shape_is_none(input_shape)
self._set_number_of_groups_for_instance_norm(input_shape)
self._check_size_of_dimensions(input_shape)
self._create_input_spec(input_shape)
self._add_gamma_weight(input_shape)
self._add_beta_weight(input_shape)
self.built = True
super().build(input_shape)
def call(self, inputs):
input_shape = tf.keras.backend.int_shape(inputs)
tensor_input_shape = tf.shape(inputs)
reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
outputs = tf.reshape(normalized_inputs, tensor_input_shape)
else:
outputs = normalized_inputs
return outputs
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": tf.keras.initializers.serialize(self.beta_initializer),
"gamma_initializer": tf.keras.initializers.serialize(self.gamma_initializer),
"beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": tf.keras.regularizers.serialize(self.gamma_regularizer),
"beta_constraint": tf.keras.constraints.serialize(self.beta_constraint),
"gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
group_shape = tf.stack(group_shape)
reshaped_inputs = tf.reshape(inputs, group_shape)
return reshaped_inputs, group_shape
else:
return inputs, group_shape
def _apply_normalization(self, reshaped_inputs, input_shape):
group_shape = tf.keras.backend.int_shape(reshaped_inputs)
group_reduction_axes = list(range(1, len(group_shape)))
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
axis = -2 if self.axis == -1 else self.axis - 1
else:
axis = -1 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
gamma, beta = self._get_reshaped_weights(input_shape)
normalized_inputs = tf.nn.batch_normalization(
reshaped_inputs,
mean=mean,
variance=variance,
scale=gamma,
offset=beta,
variance_epsilon=self.epsilon,
)
return normalized_inputs
def _get_reshaped_weights(self, input_shape):
broadcast_shape = self._create_broadcast_shape(input_shape)
gamma = None
beta = None
if self.scale:
gamma = tf.reshape(self.gamma, broadcast_shape)
if self.center:
beta = tf.reshape(self.beta, broadcast_shape)
return gamma, beta
def _check_if_input_shape_is_none(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
"Axis " + str(self.axis) + " of "
"input tensor should have a defined dimension "
"but the layer received an input with shape " + str(input_shape) + "."
)
def _set_number_of_groups_for_instance_norm(self, input_shape):
dim = input_shape[self.axis]
if self.groups == -1:
self.groups = dim
def _check_size_of_dimensions(self, input_shape):
dim = input_shape[self.axis]
if dim < self.groups:
raise ValueError(
"Number of groups (" + str(self.groups) + ") cannot be "
"more than the number of channels (" + str(dim) + ")."
)
if dim % self.groups != 0:
raise ValueError(
"Number of groups (" + str(self.groups) + ") must be a "
"multiple of the number of channels (" + str(dim) + ")."
)
def _check_axis(self):
if self.axis == 0:
raise ValueError(
"You are trying to normalize your batch axis. Do you want to "
"use tf.layer.batch_normalization instead"
)
def _create_input_spec(self, input_shape):
dim = input_shape[self.axis]
self.input_spec = tf.keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim})
def _add_gamma_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
)
else:
self.gamma = None
def _add_beta_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
)
else:
self.beta = None
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
else:
broadcast_shape[self.axis] = self.groups
return broadcast_shape
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert
class TFHubertWeightNormConv1D(tf.keras.layers.Conv1D):
"""Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm"""
def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs):
super().__init__(
filters=filters,
kernel_size=kernel_size,
groups=groups,
padding="valid",
use_bias=True,
bias_initializer="he_normal",
**kwargs,
)
self.explicit_padding = explicit_padding
self.filter_axis = 2
self.initialized = False
self.kernel_norm_axes = tf.constant([0, 1])
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes))
self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis])
def _normalize_kernel(self):
"""Generate normalized weights."""
kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g)
self.kernel = tf.transpose(kernel)
def build(self, input_shape):
if not self.built:
input_shape = input_shape.as_list()
# Conv1D output shapes are checked at build time since TF 2.7, so we need to account for padding
input_shape[-2] += self.explicit_padding * 2
super().build(input_shape)
self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True)
self.weight_v = self.kernel
self.weight_g = self.add_weight(
name="weight_g",
shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1),
initializer="ones",
dtype=self.weight_v.dtype,
trainable=True,
)
self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True)
def call(self, inputs):
if not self.initialized:
self._init_norm()
self.initialized = True
self._normalize_kernel()
padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0)))
output = super().call(padded_inputs)
return output
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
class TFHubertNoLayerNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
class TFHubertLayerNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.layer_norm = tf.keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
class TFHubertGroupNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.activation = get_tf_activation(config.feat_extract_activation)
self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm")
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
class TFHubertPositionalConvEmbedding(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.conv = TFHubertWeightNormConv1D(
filters=config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
groups=config.num_conv_pos_embedding_groups,
explicit_padding=config.num_conv_pos_embeddings // 2,
name="conv",
)
self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert
class TFHubertSamePadLayer(tf.keras.layers.Layer):
def __init__(self, num_conv_pos_embeddings, **kwargs):
super().__init__(**kwargs)
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def call(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, : -self.num_pad_remove, :]
return hidden_states
class TFHubertFeatureEncoder(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
super().__init__(**kwargs)
if config.feat_extract_norm == "group":
conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [
TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}")
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}")
for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = conv_layers
def call(self, input_values):
hidden_states = tf.expand_dims(input_values, -1)
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
class TFHubertFeatureExtractor(TFHubertFeatureEncoder):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
class TFHubertFeatureProjection(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.projection = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="projection",
)
self.dropout = tf.keras.layers.Dropout(rate=config.feat_proj_dropout)
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
return hidden_states
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert
class TFHubertAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert
class TFHubertFeedForward(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.intermediate_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.intermediate_dense = tf.keras.layers.Dense(
units=config.intermediate_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="intermediate_dense",
)
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
self.output_dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="output_dense",
)
self.output_dropout = tf.keras.layers.Dropout(config.hidden_dropout)
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states, training=training)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states, training=training)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert
class TFHubertEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFHubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
name="attention",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
self.final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="final_layer_norm"
)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, training=training
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
class TFHubertEncoderLayerStableLayerNorm(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFHubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
name="attention",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
self.final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="final_layer_norm"
)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, training=training
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert
class TFHubertEncoder(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
if training and (dropout_probability < self.config.layerdrop): # skip the layer
continue
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
class TFHubertEncoderStableLayerNorm(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer = [
TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)
]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states, training=training)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
if training and (dropout_probability < self.config.layerdrop): # skip the layer
continue
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@keras_serializable
class TFHubertMainLayer(tf.keras.layers.Layer):
config_class = HubertConfig
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor")
self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection")
if config.do_stable_layer_norm:
self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder")
else:
self.encoder = TFHubertEncoder(config, name="encoder")
def build(self, input_shape: tf.TensorShape):
self.masked_spec_embed = self.add_weight(
shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed"
)
super().build(input_shape)
def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (input_length - kernel_size) // stride + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: Optional[tf.Tensor] = None):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
batch_size, sequence_length, hidden_size = shape_list(hidden_states)
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states = tf.where(
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
hidden_states,
)
elif self.config.mask_time_prob > 0:
# generate indices & apply SpecAugment along time axis
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
min_masks=2,
)
hidden_states = tf.where(
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
hidden_states,
)
# apply SpecAugment along feature axis
if self.config.mask_feature_prob > 0:
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
)
hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0)
return hidden_states
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[tf.Tensor] = None,
output_hidden_states: Optional[tf.Tensor] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs: Any,
):
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
hidden_states = self.feature_extractor(
tf.cast(inputs["input_values"], tf.float32), training=inputs["training"]
)
if inputs["attention_mask"] is not None:
# compute real output lengths according to convolution formula
output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(inputs["attention_mask"], -1))
attention_mask = tf.sequence_mask(
output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype
)
hidden_states = self.feature_projection(hidden_states, training=inputs["training"])
mask_time_indices = kwargs.get("mask_time_indices", None)
if inputs["training"]:
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = encoder_outputs[0]
if not inputs["return_dict"]:
return (hidden_states,) + encoder_outputs[1:]
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFHubertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = HubertConfig
base_model_prefix = "hubert"
main_input_name = "input_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
pad_token = 0.0
input_values = tf.convert_to_tensor(np.random.rand(1, 16000), tf.float32)
dummy_inputs = {
"input_values": input_values,
"attention_mask": tf.cast(tf.not_equal(input_values, pad_token), tf.float32),
}
return dummy_inputs
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
logger.warning(
f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
"to train/fine-tine this model, you need a GPU or a TPU"
)
@tf.function
def serving(self, inputs):
output = self.call(input_values=inputs, training=False)
return self.serving_output(output)
HUBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_values` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_values": input_values, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
HUBERT_INPUTS_DOCSTRING = r"""
Args:
input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_values` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False``):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.",
HUBERT_START_DOCSTRING,
)
class TFHubertModel(TFHubertPreTrainedModel):
def __init__(self, config: HubertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.config = config
self.hubert = TFHubertMainLayer(config, name="hubert")
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
"""
Returns:
Example:
```python
>>> from transformers import Wav2Vec2Processor, TFHubertModel
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h")
>>> model = TFHubertModel.from_pretrained("facebook/hubert-base-960h")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
>>> hidden_states = model(input_values).last_hidden_state
```"""
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
inputs["output_hidden_states"] = (
inputs["output_hidden_states"] if inputs["output_hidden_states"] else self.config.output_hidden_states
)
inputs["output_attentions"] = (
inputs["output_attentions"] if inputs["output_attentions"] else self.config.output_attentions
)
inputs["return_dict"] = inputs["return_dict"] if inputs["return_dict"] else self.config.return_dict
outputs = self.hubert(
input_values=inputs["input_values"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
HUBERT_START_DOCSTRING,
)
class TFHubertForCTC(TFHubertPreTrainedModel):
def __init__(self, config: HubertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.hubert = TFHubertMainLayer(config, name="hubert")
self.dropout = tf.keras.layers.Dropout(config.final_dropout)
self.lm_head = tf.keras.layers.Dense(config.vocab_size, name="lm_head")
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.hubert.feature_extractor.trainable = False
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
labels: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Returns:
Example:
```python
>>> import tensorflow as tf
>>> from transformers import Wav2Vec2Processor, TFHubertForCTC
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h")
>>> model = TFHubertForCTC.from_pretrained("facebook/hubert-base-960h")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
>>> logits = model(input_values).logits
>>> predicted_ids = tf.argmax(logits, axis=-1)
>>> transcription = processor.decode(predicted_ids[0])
>>> # compute loss
>>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST"
>>> # wrap processor as target processor to encode labels
>>> with processor.as_target_processor():
... labels = processor(transcription, return_tensors="tf").input_values
>>> loss = model(input_values, labels=labels).loss
```"""
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
outputs = self.hubert(
input_values=inputs["input_values"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states, training=inputs["training"])
logits = self.lm_head(hidden_states)
if labels is not None:
if tf.reduce_max(labels) >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
attention_mask = (
inputs["attention_mask"]
if inputs["attention_mask"] is not None
else tf.ones_like(inputs["input_values"], dtype=tf.float32)
)
input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1))
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = tf.cast(labels >= 0, tf.int32)
target_lengths = tf.reduce_sum(labels_mask, axis=-1)
loss = tf.nn.ctc_loss(
logits=logits,
labels=labels,
logit_length=input_lengths,
label_length=target_lengths,
blank_index=self.config.pad_token_id,
logits_time_major=False,
)
if self.config.ctc_loss_reduction == "sum":
loss = tf.reduce_sum(loss)
if self.config.ctc_loss_reduction == "mean":
loss = tf.reduce_mean(loss)
else:
loss = None
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| 1.734375 | 2 |
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py | marioluan/mit-opencourseware-cs | 0 | 2395 | # 6.00 Problem Set 2
#
# Hangman
# Name : Solutions
# Collaborators : <your collaborators>
# Time spent : <total time>
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# load the list of words into the wordlist variable
# so that it can be accessed from anywhere in the program
wordlist = load_words()
def partial_word(secret_word, guessed_letters):
"""
Return the secret_word in user-visible format, with underscores used
to replace characters that have not yet been guessed.
"""
result = ''
for letter in secret_word:
if letter in guessed_letters:
result = result + letter
else:
result = result + '_'
return result
def hangman():
"""
Runs the hangman game.
"""
print 'Welcome to the game, Hangman!'
secret_word = choose_word(wordlist)
print 'I am thinking of a word that is ' + str(len(secret_word)) + ' letters long.'
num_guesses = 8
word_guessed = False
guessed_letters = ''
available_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# Letter-guessing loop. Ask the user to guess a letter and respond to the
# user based on whether the word has yet been correctly guessed.
while num_guesses > 0 and not word_guessed:
print '-------------'
print 'You have ' + str(num_guesses) + ' guesses left.'
print 'Available letters: ' + ''.join(available_letters)
guess = raw_input('Please guess a letter:')
if guess not in available_letters:
print 'Oops! You\'ve already guessed that letter: ' + partial_word(secret_word, guessed_letters)
elif guess not in secret_word:
num_guesses -= 1
available_letters.remove(guess)
print 'Oops! That letter is not in my word: ' + partial_word(secret_word, guessed_letters)
else:
available_letters.remove(guess)
guessed_letters += guess
print 'Good guess: ' + partial_word(secret_word, guessed_letters)
if secret_word == partial_word(secret_word, guessed_letters):
word_guessed = True
if word_guessed:
print 'Congratulations, you won!'
else:
print 'Game over.'
| 4.125 | 4 |
top/api/rest/FenxiaoRefundMessageAddRequest.py | forestsheep/middleman | 0 | 2396 | <reponame>forestsheep/middleman
'''
Created by auto_sdk on 2016.04.13
'''
from top.api.base import RestApi
class FenxiaoRefundMessageAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.image = None
self.message_content = None
self.sub_order_id = None
def getapiname(self):
return 'taobao.fenxiao.refund.message.add'
def getMultipartParas(self):
return ['image']
| 1.742188 | 2 |
image-generation/slegan/args.py | AaratiAkkapeddi/nnabla-examples | 228 | 2397 | <filename>image-generation/slegan/args.py
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_args(batch_size=8, image_size=256, max_iter=100000):
"""
Get command line arguments.
Arguments set the default values of command line arguments.
"""
import argparse
import os
description = "Example of Lightweight GAN."
parser = argparse.ArgumentParser(description)
parser.add_argument("-d", "--device-id", type=str, default="0",
help="Device id.")
parser.add_argument("-c", "--context", type=str, default="cudnn",
help="Context.")
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument("--img-path", type=str,
default="~/AnimalFace-dog",
help="Image path.")
parser.add_argument("--image-size", type=int, default=image_size,
help="Image size.")
parser.add_argument("--batch-size", "-b", type=int, default=batch_size,
help="Batch size.")
parser.add_argument("--max-iter", "-i", type=int, default=max_iter,
help="Max iterations.")
parser.add_argument("--save-interval", type=int, default=50000,
help="Interval for saving models.")
parser.add_argument("--test-interval", type=int, default=5000,
help="Interval for testing models.")
parser.add_argument("--latent", type=int, default=256,
help="Number of latent variables.")
parser.add_argument("--monitor-path", type=str, default="./result/tmp",
help="Monitor path.")
parser.add_argument("--model-load-path", type=str, default=".",
help="Path to load parameters from")
parser.add_argument("--train-samples", type=int, default=-1,
help="Number of data to be used. When -1 is set all data is used.")
parser.add_argument("--lr", type=float, default=2e-4,
help="Learning rate")
parser.add_argument("--aug-list", nargs="+",
default=["lrflip", "translation", "color"])
args = parser.parse_args()
return args
def save_args(args, mode="train"):
from nnabla import logger
import os
if not os.path.exists(args.monitor_path):
os.makedirs(args.monitor_path)
path = "{}/Arguments-{}.txt".format(args.monitor_path, mode)
logger.info("Arguments are saved to {}.".format(path))
with open(path, "w") as fp:
for k, v in sorted(vars(args).items()):
logger.info("{}={}".format(k, v))
fp.write("{}={}\n".format(k, v))
| 2.375 | 2 |
day1/files_ex1.py | grenn72/pynet-ons-feb19 | 0 | 2398 | <gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
# READ ####
f = open("my_file.txt")
print("\nLoop directly over file")
print("-" * 60)
for line in f:
print(line.strip())
print("-" * 60)
f.seek(0)
my_content = f.readlines()
print("\nUse readlines method")
print("-" * 60)
for line in my_content:
print(line.strip())
print("-" * 60)
f.seek(0)
my_content = f.read()
print("\nUse read + splitlines")
print("-" * 60)
for line in my_content.splitlines():
print(line)
print("-" * 60)
f.close()
with open("my_file.txt") as f:
print("\nUse with and loop over file")
print("-" * 60)
for line in f:
print(line.strip())
print("-" * 60)
# WRITE ####
print("\nWriting file.")
f = open("new_file.txt", "w")
f.write("whatever2\n")
f.close()
# APPEND ####
print("\nAppending file.")
with open("new_file.txt", "a") as f:
f.write("something else\n")
print()
| 3.671875 | 4 |
test/integration_tests/test_integration_datasets_client.py | self-host/selfhost-python-client | 0 | 2399 | import uuid
from typing import List, Dict, Any
import unittest
from selfhost_client import SelfHostClient, DatasetType
class TestIntegrationDatasetsClient(unittest.TestCase):
"""
Run these tests individually because Self-Host will return HTTP 429 Too Many Requests otherwise.
"""
@classmethod
def setUpClass(cls) -> None:
cls.client: SelfHostClient = SelfHostClient(
base_url='http://127.0.0.1:8080',
username='test',
password='<PASSWORD>'
)
cls.unique_name: str = str(uuid.uuid4())
cls.created_dataset: DatasetType = cls.client.create_dataset(
name=cls.unique_name,
dataset_format='ini',
content='aGVsbG8sIHdvcmxkIQ==',
tags=['test_tag']
)
@classmethod
def tearDownClass(cls) -> None:
cls.client.delete_dataset(cls.created_dataset['uuid'])
def test_get_datasets(self) -> None:
params: Dict[str, int] = {
'limit': 20,
'offset': 0
}
datasets: List[DatasetType] = self.client.get_datasets(**params)
self.assertIsNotNone(datasets)
def test_create_and_delete_dataset(self) -> None:
# Create and delete happens in setup and teardown methods.
self.assertEqual(self.created_dataset['name'], self.unique_name)
def test_get_dataset(self) -> None:
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], self.created_dataset['name'])
def test_update_dataset(self) -> None:
self.client.update_dataset(
dataset_uuid=self.created_dataset['uuid'],
name=f'{self.created_dataset["name"]} Updated',
dataset_format='json',
tags=['updated']
)
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], f'{self.created_dataset["name"]} Updated')
self.assertEqual(fetched_dataset['format'], 'json')
self.assertEqual(fetched_dataset['tags'], ['updated'])
def test_get_dataset_raw_content(self) -> None:
fetched_content: Any = self.client.get_dataset_raw_content(self.created_dataset['uuid'])
self.assertIsNotNone(fetched_content)
| 2.71875 | 3 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.