max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_data/lazy_mod.py | brettcannon/modutil | 17 | 3000 | import modutil
mod, __getattr__ = modutil.lazy_import(__name__,
['tests.test_data.A', '.B', '.C as still_C'])
def trigger_A():
return mod.A
def trigger_B():
return mod.B
def trigger_C():
return mod.still_C
def trigger_failure():
return mod.does_not_exist
| 1.9375 | 2 |
test.py | xiaohuaibaoguigui/EllSeg | 1 | 3001 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import tqdm
import torch
import pickle
import resource
import numpy as np
import matplotlib.pyplot as plt
from args import parse_args
from modelSummary import model_dict
from pytorchtools import load_from_file
from torch.utils.data import DataLoader
from helperfunctions import mypause, stackall_Dict
from loss import get_seg2ptLoss
from utils import get_nparams, get_predictions
from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048*10, rlimit[1]))
#%%
if __name__ == '__main__':
args = parse_args()
device=torch.device("cuda")
torch.cuda.manual_seed(12)
if torch.cuda.device_count() > 1:
print('Moving to a multiGPU setup.')
args.useMultiGPU = True
else:
args.useMultiGPU = False
torch.backends.cudnn.deterministic=False
if args.model not in model_dict:
print("Model not found.")
print("valid models are: {}".format(list(model_dict.keys())))
exit(1)
LOGDIR = os.path.join(os.getcwd(), 'logs', args.model, args.expname)
path2model = os.path.join(LOGDIR, 'weights')
path2checkpoint = os.path.join(LOGDIR, 'checkpoints')
path2writer = os.path.join(LOGDIR, 'TB.lock')
path2op = os.path.join(os.getcwd(), 'op', str(args.curObj))
os.makedirs(LOGDIR, exist_ok=True)
os.makedirs(path2model, exist_ok=True)
os.makedirs(path2checkpoint, exist_ok=True)
os.makedirs(path2writer, exist_ok=True)
os.makedirs(path2op, exist_ok=True)
model = model_dict[args.model]
netDict = load_from_file([args.loadfile,
os.path.join(path2checkpoint, 'checkpoint.pt')])
startEp = netDict['epoch'] if 'epoch' in netDict.keys() else 0
if 'state_dict' in netDict.keys():
model.load_state_dict(netDict['state_dict'])
print('Parameters: {}'.format(get_nparams(model)))
model = model if not args.useMultiGPU else torch.nn.DataParallel(model)
model = model.to(device).to(args.prec)
f = open(os.path.join('curObjects',
'baseline',
'cond_'+str(args.curObj)+'.pkl'), 'rb')
_, _, testObj = pickle.load(f)
testObj.path2data = os.path.join(args.path2data, 'Datasets', 'All')
testObj.augFlag = False
testloader = DataLoader(testObj,
batch_size=args.batchsize,
shuffle=False,
num_workers=args.workers,
drop_last=False)
if args.disp:
fig, axs = plt.subplots(nrows=1, ncols=1)
#%%
accLoss = 0.0
imCounter = 0
ious = []
dists_pupil_latent = []
dists_pupil_seg = []
dists_iris_latent = []
dists_iris_seg = []
model.eval()
opDict = {'id':[], 'archNum': [], 'archName': [], 'code': [],
'scores':{'iou':[], 'lat_dst':[], 'seg_dst':[]},
'pred':{'pup_latent_c':[],
'pup_seg_c':[],
'iri_latent_c':[],
'iri_seg_c':[],
'mask':[]},
'gt':{'pup_c':[], 'mask':[]}}
with torch.no_grad():
for bt, batchdata in enumerate(tqdm.tqdm(testloader)):
img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata
out_tup = model(img.to(device).to(args.prec),
labels.to(device).long(),
pupil_center.to(device).to(args.prec),
elNorm.to(device).to(args.prec),
spatialWeights.to(device).to(args.prec),
distMap.to(device).to(args.prec),
cond.to(device).to(args.prec),
imInfo[:, 2].to(device).to(torch.long),
0.5)
output, elOut, latent, loss = out_tup
latent_pupil_center = elOut[:, 0:2].detach().cpu().numpy()
latent_iris_center = elOut[:, 5:7].detach().cpu().numpy()
_, seg_pupil_center = get_seg2ptLoss(output[:, 2, ...].cpu(), pupil_center, temperature=4)
_, seg_iris_center = get_seg2ptLoss(-output[:, 0, ...].cpu(), iris_center, temperature=4)
loss = loss if args.useMultiGPU else loss.mean()
accLoss += loss.detach().cpu().item()
predict = get_predictions(output)
iou, iou_bySample = getSeg_metrics(labels.numpy(),
predict.numpy(),
cond[:, 1].numpy())[1:]
latent_pupil_dist, latent_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
latent_pupil_center,
cond[:,0].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_pupil_dist, seg_pupil_dist_bySample = getPoint_metric(pupil_center.numpy(),
seg_pupil_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
latent_iris_dist, latent_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
latent_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
seg_iris_dist, seg_iris_dist_bySample = getPoint_metric(iris_center.numpy(),
seg_iris_center,
cond[:,1].numpy(),
img.shape[2:],
True) # Unnormalizes the points
dists_pupil_latent.append(latent_pupil_dist)
dists_iris_latent.append(latent_iris_dist)
dists_pupil_seg.append(seg_pupil_dist)
dists_iris_seg.append(seg_iris_dist)
ious.append(iou)
pup_latent_c = unnormPts(latent_pupil_center,
img.shape[2:])
pup_seg_c = unnormPts(seg_pupil_center,
img.shape[2:])
iri_latent_c = unnormPts(latent_iris_center,
img.shape[2:])
iri_seg_c = unnormPts(seg_iris_center,
img.shape[2:])
dispI = generateImageGrid(img.numpy().squeeze(),
predict.numpy(),
elOut.detach().cpu().numpy().reshape(-1, 2, 5),
pup_seg_c,
cond.numpy(),
override=True,
heatmaps=False)
for i in range(0, img.shape[0]):
archNum = testObj.imList[imCounter, 1]
opDict['id'].append(testObj.imList[imCounter, 0])
opDict['code'].append(latent[i,...].detach().cpu().numpy())
opDict['archNum'].append(archNum)
opDict['archName'].append(testObj.arch[archNum])
opDict['pred']['pup_latent_c'].append(pup_latent_c[i, :])
opDict['pred']['pup_seg_c'].append(pup_seg_c[i, :])
opDict['pred']['iri_latent_c'].append(iri_latent_c[i, :])
opDict['pred']['iri_seg_c'].append(iri_seg_c[i, :])
if args.test_save_op_masks:
opDict['pred']['mask'].append(predict[i,...].numpy().astype(np.uint8))
opDict['scores']['iou'].append(iou_bySample[i, ...])
opDict['scores']['lat_dst'].append(latent_pupil_dist_bySample[i, ...])
opDict['scores']['seg_dst'].append(seg_pupil_dist_bySample[i, ...])
opDict['gt']['pup_c'].append(pupil_center[i,...].numpy())
if args.test_save_op_masks:
opDict['gt']['mask'].append(labels[i,...].numpy().astype(np.uint8))
imCounter+=1
if args.disp:
if bt == 0:
h_im = plt.imshow(dispI.permute(1, 2, 0))
plt.pause(0.01)
else:
h_im.set_data(dispI.permute(1, 2, 0))
mypause(0.01)
opDict = stackall_Dict(opDict)
ious = np.stack(ious, axis=0)
ious = np.nanmean(ious, axis=0)
print('mIoU: {}. IoUs: {}'.format(np.mean(ious), ious))
print('Latent space PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_latent),
np.nanstd(dists_pupil_latent)))
print('Segmentation PUPIL dist. Med: {}, STD: {}'.format(np.nanmedian(dists_pupil_seg),
np.nanstd(dists_pupil_seg)))
print('Latent space IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_latent),
np.nanstd(dists_iris_latent)))
print('Segmentation IRIS dist. Med: {}, STD: {}'.format(np.nanmedian(dists_iris_seg),
np.nanstd(dists_iris_seg)))
print('--- Saving output directory ---')
f = open(os.path.join(path2op, 'opDict.pkl'), 'wb')
pickle.dump(opDict, f)
f.close()
| 1.960938 | 2 |
tests/test_util.py | meskio/tuf | 1 | 3002 | #!/usr/bin/env python
"""
<Program Name>
test_util.py
<Author>
<NAME>.
<Started>
February 1, 2013.
<Copyright>
See LICENSE for licensing information.
<Purpose>
Unit test for 'util.py'
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import gzip
import shutil
import logging
import tempfile
import unittest
import tuf
import tuf.log
import tuf.hash
import tuf.util
import tuf.unittest_toolbox as unittest_toolbox
import tuf._vendor.six as six
logger = logging.getLogger('tuf.test_util')
class TestUtil(unittest_toolbox.Modified_TestCase):
def setUp(self):
unittest_toolbox.Modified_TestCase.setUp(self)
self.temp_fileobj = tuf.util.TempFile()
def tearDown(self):
unittest_toolbox.Modified_TestCase.tearDown(self)
self.temp_fileobj.close_temp_file()
def test_A1_tempfile_close_temp_file(self):
# Was the temporary file closed?
self.temp_fileobj.close_temp_file()
self.assertTrue(self.temp_fileobj.temporary_file.closed)
def _extract_tempfile_directory(self, config_temp_dir=None):
"""
Takes a directory (essentially specified in the conf.py as
'temporary_directory') and substitutes tempfile.TemporaryFile() with
tempfile.mkstemp() in order to extract actual directory of the stored
tempfile. Returns the config's temporary directory (or default temp
directory) and actual directory.
"""
# Patching 'tuf.conf.temporary_directory'.
tuf.conf.temporary_directory = config_temp_dir
if config_temp_dir is None:
# 'config_temp_dir' needs to be set to default.
config_temp_dir = tempfile.gettempdir()
# Patching 'tempfile.TemporaryFile()' (by substituting
# temfile.TemporaryFile() with tempfile.mkstemp()) in order to get the
# directory of the stored tempfile object.
saved_tempfile_TemporaryFile = tuf.util.tempfile.NamedTemporaryFile
tuf.util.tempfile.NamedTemporaryFile = tempfile.mkstemp
_temp_fileobj = tuf.util.TempFile()
tuf.util.tempfile.NamedTemporaryFile = saved_tempfile_TemporaryFile
junk, _tempfilepath = _temp_fileobj.temporary_file
_tempfile_dir = os.path.dirname(_tempfilepath)
# In the case when 'config_temp_dir' is None or some other discrepancy,
# '_temp_fileobj' needs to be closed manually since tempfile.mkstemp()
# was used.
if os.path.exists(_tempfilepath):
os.remove(_tempfilepath)
return config_temp_dir, _tempfile_dir
def test_A2_tempfile_init(self):
# Goal: Verify that temporary files are stored in the appropriate temp
# directory. The location of the temporary files is set in 'tuf.conf.py'.
# Test: Expected input verification.
# Assumed 'tuf.conf.temporary_directory' is 'None' initially.
temp_file = tuf.util.TempFile()
temp_file_directory = os.path.dirname(temp_file.temporary_file.name)
self.assertEqual(tempfile.gettempdir(), temp_file_directory)
saved_temporary_directory = tuf.conf.temporary_directory
temp_directory = self.make_temp_directory()
tuf.conf.temporary_directory = temp_directory
temp_file = tuf.util.TempFile()
temp_file_directory = os.path.dirname(temp_file.temporary_file.name)
self.assertEqual(temp_directory, temp_file_directory)
tuf.conf.temporary_directory = saved_temporary_directory
# Test: Unexpected input handling.
config_temp_dirs = [self.random_string(), 123, ['a'], {'a':1}]
for config_temp_dir in config_temp_dirs:
config_temp_dir, actual_dir = \
self._extract_tempfile_directory(config_temp_dir)
self.assertEqual(tempfile.gettempdir(), actual_dir)
def test_A3_tempfile_read(self):
filepath = self.make_temp_data_file(data = '1234567890')
fileobj = open(filepath, 'rb')
# Patching 'temp_fileobj.temporary_file'.
self.temp_fileobj.temporary_file = fileobj
# Test: Expected input.
self.assertEqual(self.temp_fileobj.read().decode('utf-8'), '1234567890')
self.assertEqual(self.temp_fileobj.read(4).decode('utf-8'), '1234')
# Test: Unexpected input.
for bogus_arg in ['abcd', ['abcd'], {'a':'a'}, -100]:
self.assertRaises(tuf.FormatError, self.temp_fileobj.read, bogus_arg)
def test_A4_tempfile_write(self):
data = self.random_string()
self.temp_fileobj.write(data.encode('utf-8'))
self.assertEqual(data, self.temp_fileobj.read().decode('utf-8'))
self.temp_fileobj.write(data.encode('utf-8'), auto_flush=False)
self.assertEqual(data, self.temp_fileobj.read().decode('utf-8'))
def test_A5_tempfile_move(self):
# Destination directory to save the temporary file in.
dest_temp_dir = self.make_temp_directory()
dest_path = os.path.join(dest_temp_dir, self.random_string())
self.temp_fileobj.write(self.random_string().encode('utf-8'))
self.temp_fileobj.move(dest_path)
self.assertTrue(dest_path)
def _compress_existing_file(self, filepath):
"""
[Helper]Compresses file 'filepath' and returns file path of
the compresses file.
"""
# NOTE: DO NOT forget to remove the newly created compressed file!
if os.path.exists(filepath):
compressed_filepath = filepath+'.gz'
f_in = open(filepath, 'rb')
f_out = gzip.open(compressed_filepath, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
return compressed_filepath
else:
logger.error('Compression of '+repr(filepath)+' failed. Path does not exist.')
sys.exit(1)
def _decompress_file(self, compressed_filepath):
"""[Helper]"""
if os.path.exists(compressed_filepath):
f = gzip.open(compressed_filepath, 'rb')
file_content = f.read()
f.close()
return file_content
else:
logger.error('Decompression of '+repr(compressed_filepath)+' failed. '+\
'Path does not exist.')
sys.exit(1)
def test_A6_tempfile_decompress_temp_file_object(self):
# Setup: generate a temp file (self.make_temp_data_file()),
# compress it. Write it to self.temp_fileobj().
filepath = self.make_temp_data_file()
fileobj = open(filepath, 'rb')
compressed_filepath = self._compress_existing_file(filepath)
compressed_fileobj = open(compressed_filepath, 'rb')
self.temp_fileobj.write(compressed_fileobj.read())
os.remove(compressed_filepath)
# Try decompression using incorrect compression type i.e. compressions
# other than 'gzip'. In short feeding incorrect input.
bogus_args = ['zip', 1234, self.random_string()]
for arg in bogus_args:
self.assertRaises(tuf.Error,
self.temp_fileobj.decompress_temp_file_object, arg)
self.temp_fileobj.decompress_temp_file_object('gzip')
self.assertEqual(self.temp_fileobj.read(), fileobj.read())
# Checking the content of the TempFile's '_orig_file' instance.
check_compressed_original = self.make_temp_file()
with open(check_compressed_original, 'wb') as file_object:
file_object.write(self.temp_fileobj._orig_file.read())
data_in_orig_file = self._decompress_file(check_compressed_original)
fileobj.seek(0)
self.assertEqual(data_in_orig_file, fileobj.read())
# Try decompressing once more.
self.assertRaises(tuf.Error,
self.temp_fileobj.decompress_temp_file_object, 'gzip')
# Test decompression of invalid gzip file.
temp_file = tuf.util.TempFile()
fileobj.seek(0)
temp_file.write(fileobj.read())
temp_file.decompress_temp_file_object('gzip')
def test_B1_get_file_details(self):
# Goal: Verify proper output given certain expected/unexpected input.
# Making a temporary file.
filepath = self.make_temp_data_file()
# Computing the hash and length of the tempfile.
digest_object = tuf.hash.digest_filename(filepath, algorithm='sha256')
file_hash = {'sha256' : digest_object.hexdigest()}
file_length = os.path.getsize(filepath)
# Test: Expected input.
self.assertEqual(tuf.util.get_file_details(filepath), (file_length, file_hash))
# Test: Incorrect input.
bogus_inputs = [self.random_string(), 1234, [self.random_string()],
{'a': 'b'}, None]
for bogus_input in bogus_inputs:
if isinstance(bogus_input, six.string_types):
self.assertRaises(tuf.Error, tuf.util.get_file_details, bogus_input)
else:
self.assertRaises(tuf.FormatError, tuf.util.get_file_details, bogus_input)
def test_B2_ensure_parent_dir(self):
existing_parent_dir = self.make_temp_directory()
non_existing_parent_dir = os.path.join(existing_parent_dir, 'a', 'b')
for parent_dir in [existing_parent_dir, non_existing_parent_dir, 12, [3]]:
if isinstance(parent_dir, six.string_types):
tuf.util.ensure_parent_dir(os.path.join(parent_dir, 'a.txt'))
self.assertTrue(os.path.isdir(parent_dir))
else:
self.assertRaises(tuf.FormatError, tuf.util.ensure_parent_dir, parent_dir)
def test_B3_file_in_confined_directories(self):
# Goal: Provide invalid input for 'filepath' and 'confined_directories'.
# Include inputs like: '[1, 2, "a"]' and such...
# Reference to 'file_in_confined_directories()' to improve readability.
in_confined_directory = tuf.util.file_in_confined_directories
list_of_confined_directories = ['a', 12, {'a':'a'}, [1]]
list_of_filepaths = [12, ['a'], {'a':'a'}, 'a']
for bogus_confined_directory in list_of_confined_directories:
for filepath in list_of_filepaths:
self.assertRaises(tuf.FormatError, in_confined_directory,
filepath, bogus_confined_directory)
# Test: Inputs that evaluate to False.
confined_directories = ['a/b/', 'a/b/c/d/']
self.assertFalse(in_confined_directory('a/b/c/1.txt', confined_directories))
confined_directories = ['a/b/c/d/e/']
self.assertFalse(in_confined_directory('a', confined_directories))
self.assertFalse(in_confined_directory('a/b', confined_directories))
self.assertFalse(in_confined_directory('a/b/c', confined_directories))
self.assertFalse(in_confined_directory('a/b/c/d', confined_directories))
# Below, 'e' is a file in the 'a/b/c/d/' directory.
self.assertFalse(in_confined_directory('a/b/c/d/e', confined_directories))
# Test: Inputs that evaluate to True.
self.assertTrue(in_confined_directory('a/b/c.txt', ['']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['a/b/']))
self.assertTrue(in_confined_directory('a/b/c.txt', ['x', '']))
self.assertTrue(in_confined_directory('a/b/c/..', ['a/']))
def test_B4_import_json(self):
self.assertTrue('json' in sys.modules)
def test_B5_load_json_string(self):
# Test normal case.
data = ['a', {'b': ['c', None, 30.3, 29]}]
json_string = tuf.util.json.dumps(data)
self.assertEqual(data, tuf.util.load_json_string(json_string))
# Test invalid arguments.
self.assertRaises(tuf.Error, tuf.util.load_json_string, 8)
invalid_json_string = {'a': tuf.FormatError}
self.assertRaises(tuf.Error, tuf.util.load_json_string, invalid_json_string)
def test_B6_load_json_file(self):
data = ['a', {'b': ['c', None, 30.3, 29]}]
filepath = self.make_temp_file()
fileobj = open(filepath, 'wt')
tuf.util.json.dump(data, fileobj)
fileobj.close()
self.assertEqual(data, tuf.util.load_json_file(filepath))
# Test a gzipped file.
compressed_filepath = self._compress_existing_file(filepath)
self.assertEqual(data, tuf.util.load_json_file(compressed_filepath))
Errors = (tuf.FormatError, IOError)
for bogus_arg in [b'a', 1, [b'a'], {'a':b'b'}]:
self.assertRaises(Errors, tuf.util.load_json_file, bogus_arg)
def test_C1_get_target_hash(self):
# Test normal case.
expected_target_hashes = {
'/file1.txt': 'e3a3d89eb3b70ce3fbce6017d7b8c12d4abd5635427a0e8a238f53157df85b3d',
'/README.txt': '8faee106f1bb69f34aaf1df1e3c2e87d763c4d878cb96b91db13495e32ceb0b0',
'/warehouse/file2.txt': 'd543a573a2cec67026eff06e75702303559e64e705eba06f65799baaf0424417'
}
for filepath, target_hash in six.iteritems(expected_target_hashes):
self.assertTrue(tuf.formats.RELPATH_SCHEMA.matches(filepath))
self.assertTrue(tuf.formats.HASH_SCHEMA.matches(target_hash))
self.assertEqual(tuf.util.get_target_hash(filepath), target_hash)
# Test for improperly formatted argument.
self.assertRaises(tuf.FormatError, tuf.util.get_target_hash, 8)
def test_C2_find_delegated_role(self):
# Test normal case. Create an expected role list, which is one of the
# required arguments to 'find_delegated_role()'.
role_list = [
{
"keyids": [
"<KEY>"
],
"name": "targets/warehouse",
"paths": [
"/file1.txt", "/README.txt", '/warehouse/'
],
"threshold": 3
},
{
"keyids": [
"<KEY>"
],
"name": "targets/tuf",
"paths": [
"/updater.py", "formats.py", '/tuf/'
],
"threshold": 4
}
]
self.assertTrue(tuf.formats.ROLELIST_SCHEMA.matches(role_list))
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/tuf'), 1)
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/warehouse'), 0)
# Test for non-existent role. 'find_delegated_role()' returns 'None'
# if the role is not found.
self.assertEqual(tuf.util.find_delegated_role(role_list, 'targets/non-existent'),
None)
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError, tuf.util.find_delegated_role, 8, role_list)
self.assertRaises(tuf.FormatError, tuf.util.find_delegated_role, 8, 'targets/tuf')
# Test duplicate roles.
role_list.append(role_list[1])
self.assertRaises(tuf.RepositoryError, tuf.util.find_delegated_role, role_list,
'targets/tuf')
# Test missing 'name' attribute (optional, but required by
# 'find_delegated_role()'.
# Delete the duplicate role, and the remaining role's 'name' attribute.
del role_list[2]
del role_list[0]['name']
self.assertRaises(tuf.RepositoryError, tuf.util.find_delegated_role, role_list,
'targets/warehouse')
def test_C3_paths_are_consistent_with_hash_prefixes(self):
# Test normal case.
path_hash_prefixes = ['e3a3', '8fae', 'd543']
list_of_targets = ['/file1.txt', '/README.txt', '/warehouse/file2.txt']
# Ensure the paths of 'list_of_targets' each have the epected path hash
# prefix listed in 'path_hash_prefixes'.
for filepath in list_of_targets:
self.assertTrue(tuf.util.get_target_hash(filepath)[0:4] in path_hash_prefixes)
self.assertTrue(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
path_hash_prefixes))
extra_invalid_prefix = ['e3a3', '8fae', 'd543', '0000']
self.assertTrue(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
extra_invalid_prefix))
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes, 8,
path_hash_prefixes)
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes,
list_of_targets, 8)
self.assertRaises(tuf.FormatError,
tuf.util.paths_are_consistent_with_hash_prefixes,
list_of_targets, ['zza1'])
# Test invalid list of targets.
bad_target_path = '/file5.txt'
self.assertTrue(tuf.util.get_target_hash(bad_target_path)[0:4] not in
path_hash_prefixes)
self.assertFalse(tuf.util.paths_are_consistent_with_hash_prefixes([bad_target_path],
path_hash_prefixes))
# Add invalid target path to 'list_of_targets'.
list_of_targets.append(bad_target_path)
self.assertFalse(tuf.util.paths_are_consistent_with_hash_prefixes(list_of_targets,
path_hash_prefixes))
def test_C4_ensure_all_targets_allowed(self):
# Test normal case.
rolename = 'targets/warehouse'
self.assertTrue(tuf.formats.ROLENAME_SCHEMA.matches(rolename))
list_of_targets = ['/file1.txt', '/README.txt', '/warehouse/file2.txt']
self.assertTrue(tuf.formats.RELPATHS_SCHEMA.matches(list_of_targets))
parent_delegations = {"keys": {
"<KEY>": {
"keytype": "ed25519",
"keyval": {
"public": "<KEY>"
}
}
},
"roles": [
{
"keyids": [
"<KEY>"
],
"name": "targets/warehouse",
"paths": [
"/file1.txt", "/README.txt", '/warehouse/'
],
"threshold": 1
}
]
}
self.assertTrue(tuf.formats.DELEGATIONS_SCHEMA.matches(parent_delegations))
tuf.util.ensure_all_targets_allowed(rolename, list_of_targets,
parent_delegations)
# The target files of 'targets' are always allowed. 'list_of_targets' and
# 'parent_delegations' are not checked in this case.
tuf.util.ensure_all_targets_allowed('targets', list_of_targets,
parent_delegations)
# Test improperly formatted arguments.
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
8, list_of_targets, parent_delegations)
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
rolename, 8, parent_delegations)
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
rolename, list_of_targets, 8)
# Test for invalid 'rolename', which has not been delegated by its parent,
# 'targets'.
self.assertRaises(tuf.RepositoryError, tuf.util.ensure_all_targets_allowed,
'targets/non-delegated_rolename', list_of_targets,
parent_delegations)
# Test for target file that is not allowed by the parent role.
self.assertRaises(tuf.ForbiddenTargetError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', ['file8.txt'], parent_delegations)
self.assertRaises(tuf.ForbiddenTargetError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', ['file1.txt', 'bad-README.txt'],
parent_delegations)
# Test for required attributes.
# Missing 'paths' attribute.
del parent_delegations['roles'][0]['paths']
self.assertRaises(tuf.FormatError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', list_of_targets, parent_delegations)
# Test 'path_hash_prefixes' attribute.
path_hash_prefixes = ['e3a3', '8fae', 'd543']
parent_delegations['roles'][0]['path_hash_prefixes'] = path_hash_prefixes
# Test normal case for 'path_hash_prefixes'.
tuf.util.ensure_all_targets_allowed('targets/warehouse', list_of_targets,
parent_delegations)
# Test target file with a path_hash_prefix that is not allowed in its
# parent role.
path_hash_prefix = tuf.util.get_target_hash('file5.txt')[0:4]
self.assertTrue(path_hash_prefix not in parent_delegations['roles'][0]
['path_hash_prefixes'])
self.assertRaises(tuf.ForbiddenTargetError, tuf.util.ensure_all_targets_allowed,
'targets/warehouse', ['file5.txt'], parent_delegations)
def test_C5_unittest_toolbox_make_temp_directory(self):
# Verify that the tearDown function does not fail when
# unittest_toolbox.make_temp_directory deletes the generated temp directory
# here.
temp_directory = self.make_temp_directory()
os.rmdir(temp_directory)
def test_c6_get_compressed_length(self):
self.temp_fileobj.write(b'hello world')
self.assertTrue(self.temp_fileobj.get_compressed_length() == 11)
temp_file = tuf.util.TempFile()
# Run unit test.
if __name__ == '__main__':
unittest.main()
| 2.828125 | 3 |
background/forms.py | BFlameSwift/AirplaneReservationSystem | 3 | 3003 |
from django import forms
class FlightrForm(forms.Form):
flight_number = forms.CharField(max_length=30, label="航班号", widget=forms.TextInput(attrs={'class': 'form-control'}))
plane_type_choices = [
('波音', (
('1', '747'),
('2', '777'),
('3', '787'),
)
),
('空客', (
('4', 'A300'),
('5', 'A310'),
('6', 'A320'),
('7', 'A350'),
)
),
]
plane_type = forms.ChoiceField(label='飞机型号', choices=plane_type_choices,widget=forms.Select)
origination = forms.CharField(max_length=30,label="始发地", widget=forms.TextInput(attrs={'class': 'form-control'}))
destination = forms.CharField(max_length=30,label="目的地", widget=forms.TextInput(attrs={'class': 'form-control'}))
starting_time = forms.TimeField(label="始发时间",widget=forms.TimeInput(attrs={'class': 'form-control'}))
departure_airport = forms.CharField(max_length=64, label="始发机场", widget=forms.TextInput(attrs={'class': 'form-control'}))
landing_airport = forms.CharField(max_length=64, label="目的机场", widget=forms.TextInput(attrs={'class': 'form-control'}))
arrival_time = forms.TimeField(label="到达时间",widget=forms.TimeInput(attrs={'class': 'form-control'}))
first_class_price = forms.FloatField(label="头等舱价格",widget=forms.NumberInput(attrs={'class': 'form-control'}))
# highlevel_economy_class_price = forms.FloatField(label="高级经济舱价格",widget=forms.NumberInput(attrs={'class': 'form-control'}))
business_class_price = forms.FloatField(label="商务舱价格",widget=forms.NumberInput(attrs={'class': 'form-control'}))
economy_class_price = forms.FloatField(label="经济舱价格",widget=forms.NumberInput(attrs={'class': 'form-control'}))
starting_date = forms.DateField(label="始发日期", widget=forms.DateInput(attrs={'class': 'form-control'}))
ending_date = forms.DateField(label="终止日期", widget=forms.DateInput(attrs={'class': 'form-control'}))
class StartStopDateForm(forms.Form):
starting_date = forms.DateField(label="始发日期", widget=forms.DateInput(attrs={'class': 'form-control'}))
ending_date = forms.DateField(label="终止日期", widget=forms.DateInput(attrs={'class': 'form-control'}))
flight_number = forms.CharField(max_length=30, label="航班号", widget=forms.TextInput(attrs={'class': 'form-control'}))
# book_sum = forms.IntegerField(label="订票总数")
# plane_capacity = forms.IntegerField(label="飞机容量")
class flight_number_Form(forms.Form):
flight_number = forms.CharField(max_length=30, label="航班号", widget=forms.TextInput(attrs={'class': 'form-control'}))
class concrete_flight_id_Form(forms.Form):
concrete_flight_id = forms.CharField(max_length=30, label="航班id", widget=forms.TextInput(attrs={'class': 'form-control'})) | 2.25 | 2 |
cams/propressing/data_rotate.py | boliqq07/cam3d | 1 | 3004 | <gh_stars>1-10
from functools import lru_cache
from math import cos, sin
import scipy
from scipy.ndimage import affine_transform
import numpy as np
@lru_cache(maxsize=10)
def get_matrix(angles=(90, 90, 90), inverse=False):
"""
Axis of rotation Get matrix by angle.
(shear+compress)
Examples: z = 120
############################################################
---------------------- --------------------------------
-oooooooooooooooooooo- --------------------------------
-oooooooooooooooooooo- -oooooooooooooooooooo-----------
-oooooooooooooooooooo- ---oooooooooooooooooooo---------
-oooooooooooooooooooo- >>> -----oooooooooooooooooooo-------
-oooooooooooooooooooo- -------oooooooooooooooooooo-----
-oooooooooooooooooooo- ---------oooooooooooooooooooo---
-oooooooooooooooooooo- -----------oooooooooooooooooooo-
---------------------- --------------------------------
############################################################
1.The ``matrix`` is the transform matrix to rotate the data with angle. Always in Cartesian coordinates.
2.The ``inverse matrix`` is the interpolation matrix for get true data matrix(Cartesian coordinates)
from relative data matrix (Non-Cartesian coordinates).
The
Parameters
----------
angles: tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
inverse:
Compute the (multiplicative) inverse of a matrix.
"""
theta1, theta2, theta3 = [np.pi / 180 * angle for angle in angles]
matrix1 = np.array([[1, cos(theta3), 0],
[0, sin(theta3), 0],
[0, 0, 1]])
matrix2 = np.array([[1, 0, 0],
[0, 1, cos(theta1)],
[0, 0, sin(theta1)]])
matrix3 = np.array([[1, 0, cos(theta2)],
[0, 1, 0],
[0, 0, sin(theta2)]])
matrix = np.dot(matrix1, matrix2).dot(matrix3)
if inverse:
matrix = np.linalg.inv(matrix)
return matrix
def rotation_axis_by_angle(data, angles=(90, 90, 90), times=(2, 2, 2)):
"""
Get true data matrix(Cartesian coordinates) from relative data matrix (Non-Cartesian coordinates).
Parameters
----------
data: np.ndarray
data with shape (nx,ny,nz).
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
"""
matrix = get_matrix(angles=angles, inverse=True)
return rotation_axis_by_matrix(data, matrix, times=times)
def rotation_axis_by_matrix(data, matrix, times=(2, 2, 2)):
"""
Get true data matrix(Cartesian coordinates) from relative data matrix (Non-Cartesian coordinates).
Parameters
----------
data: np.ndarray
data with shape (nx,ny,nz).
matrix:tuple
See Also ``get_matrix``
times: tuple
expand the multiple of the matrix.
"""
dims_old = data.shape
dims = tuple([int(i * j) for i, j in zip(dims_old, times)])
n_data = np.zeros(dims)
d0s = int((dims[0] - dims_old[0]) / 2)
d1s = int((dims[1] - dims_old[1]) / 2)
d2s = int((dims[2] - dims_old[2]) / 2)
n_data[d0s:d0s + dims_old[0], d1s:d1s + dims_old[1], d2s:d2s + dims_old[2]] = data
coords = np.meshgrid(range(dims[0]), range(dims[1]), range(dims[2]), indexing="ij")
xy_coords = np.vstack([coords[0].reshape(-1), coords[1].reshape(-1), coords[2].reshape(-1)])
# apply the transformation matrix
# please note: the coordinates are not homogeneous.
# for the 3D case, I've added code for homogeneous coordinates, you might want to look at that
# please also note: rotation is always around the origin:
# since I want the origin to be in the image center, I had to substract dim/2, rotate, then add it again
dims2 = np.array([i / 2 for i in dims])
dims2 = dims2.reshape(-1, 1)
xy_coords = np.dot(matrix, xy_coords - dims2) + dims2
#
# # undo the stacking and reshaping
x = xy_coords[0, :]
y = xy_coords[1, :]
z = xy_coords[2, :]
x = x.reshape(dims, order="A")
y = y.reshape(dims, order="A")
z = z.reshape(dims, order="A")
new_coords = [x, y, z]
# use map_coordinates to sample values for the new image
new_img = scipy.ndimage.map_coordinates(n_data, new_coords, order=2)
return new_img
def _coords(points, angles=(90, 90, 90), times=(2, 2, 2)):
"""
Parameters
----------
points: np.darray
percent of shape.
key points with shape(n_sample,3)
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
"""
dims_old = [1, 1, 1]
matrix = get_matrix(angles=angles)
times = np.array(list(times))
times = times.reshape((-1, 1))
dims_old = np.array(dims_old)
dims_old = dims_old.reshape(-1, 1)
dims2 = dims_old / 2
points = points.T * dims_old
xy_coords = np.dot(matrix, points - dims2) + dims2
xy_coords = xy_coords + (times / 2 - 0.5)
return xy_coords
def rote_index(points, data, angles=(90, 90, 90), times=(2, 2, 2), data_init=True, return_type="float"):
"""
Parameters
----------
points: np.darray
key points with shape(n_sample,3)
percent of shape.
data: np.ndarray or tuple
data or data.shape
data_init:bool
The data is the init data (relative location) or Cartesian coordinates.(rotation_axis_by_angle)
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
return_type:str
"float", "int", "percent"
for "float", "int" return the new index
for "percent" return the new percent.
"""
data_shape = data.shape if isinstance(data, np.ndarray) else data
if data_init:
times_np = np.array([1,1,1])
else:
times_np = np.array(times)
dims = data_shape
dims = np.array(dims).reshape((-1, 1))
xy_coords = _coords(points, angles=angles, times=times)
if return_type == "percent":
return xy_coords
if return_type == "float":
return (dims * xy_coords/times_np).T
else:
return np.round((dims * xy_coords/times_np).T).astype(int) # for rounding off: .4 -, .5 +
def rote_value(points, data, angles=(90, 90, 90), times=(2, 2, 2), method="in", data_type="td"):
"""
Parameters
----------
points: np.darray
key points with shape(n_sample,3)
percent of shape.
data: np.ndarray
data
angles:tuple
3 angle of x, y, z
z angle is the intersection angle of x,y,
y angle is the intersection angle of x,z,
x angle is the intersection angle of y,z.
times: tuple
expand the multiple of the matrix.
data_type:str
if "init" the data accept init data (elfcar, chgcar). see rotation_axis_by_angle.
if "td" the data accept true matrix data . see rotation_axis_by_angle.
method:str
if "near" , return nearest site's value.
if "inter" , return the interpolation value.
"""
if data_type == "td":
new_data = data
else:
new_data = rotation_axis_by_angle(data, angles=angles, times=times)
if method == "near":
ind = rote_index(points, data, angles=angles, times=times, return_type="int")
new_value = np.array([new_data[tuple(i)] for i in ind.T])
return new_value
else:
ind = rote_index(points, data, angles=angles, times=times, return_type="float")
new_value = scipy.ndimage.map_coordinates(new_data, ind, order=2)
return new_value
| 2.796875 | 3 |
playground/conversions/parser/lola2dot.py | flange/esp | 0 | 3005 | #!/usr/bin/env python
import sys
#lolafile = open("ex-small.graph", "r")
source = 0
target = 0
lowlink = 0
trans = "bla"
print("digraph {")
with open(sys.argv[1]) as lolafile:
for line in lolafile:
if len(line) == 1:
continue
linelist = line.split(" ")
if "STATE" in linelist:
source = linelist[1]
lowlink = linelist[3].rstrip()
if "->" in linelist:
trans = linelist[0]
target = linelist[2].rstrip()
print(''' {} -> {} [label="{}", lowlink="{}"];'''.format(source, target, trans, lowlink))
print("}")
| 3.140625 | 3 |
engkor/views.py | takeshixx/dprkdict | 10 | 3006 | <gh_stars>1-10
import re
import urllib.parse
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from .models import Definition
RE_HANGUL = re.compile(r'[(]*[\uAC00-\uD7AF]+[\uAC00-\uD7AF (),;]*', re.IGNORECASE)
def index(request):
definitions = Definition.objects.all()
limit = request.GET.get('limit')
try:
limit = int(limit)
except (ValueError, TypeError):
limit = 15
paginator = Paginator(definitions, limit)
page = request.GET.get('page')
try:
show_lines = paginator.page(page)
except PageNotAnInteger:
show_lines = paginator.page(1)
except EmptyPage:
show_lines = paginator.page(paginator.num_pages)
return render(request, 'index.html', {'definitions': definitions,
'lines': show_lines})
def fix_definition_format(definition):
definition = definition.replace('{I}', '<i>') \
.replace('{/I}', '</i>') \
.replace('{B}', '<b>') \
.replace('{/B}', '</b>') \
.replace('{Pr}', '[') \
.replace('{/Pr}', ']') \
.replace('{H}', '') \
.replace('{/H}', '') \
.replace('{E}', '') \
.replace('{/E}', '') \
.replace('{J}', '') \
.replace('{/J}', '') \
.replace('{S}', '') \
.replace('{/S}', '') \
.replace('{U}', '') \
.replace('{-}', '- ')
if definition.startswith('&'):
definition = definition[1:]
word, _definition = definition.split('\n', 1)
definition = '<h4>' + word + '</h4>\n'
definition += _definition
return definition
def generate_translate_tag(word):
out = '<a href="https://translate.google.de/#ko/en/{word_url}" '
out += 'title="Translate with Google Translate" target="'
out += '_blank">{word}</a>'
out = out.format(word_url=urllib.parse.quote_plus(word.group(0)),
word=word.group(0))
return out
def get_definitions(request):
if request.is_ajax():
q = request.GET.get('term', '')
definitions = Definition.objects.filter(word__icontains=q) \
.values_list('word', flat=True)[:25]
data = list(definitions)
else:
data = []
return JsonResponse(data, safe=False)
def get_definition(request, id):
definition = get_object_or_404(Definition, id=id)
data = fix_definition_format(definition.definition)
data = RE_HANGUL.sub(generate_translate_tag, data)
return HttpResponse(data)
def get_definition_word(request, word):
definition = get_object_or_404(Definition, word=word)
data = fix_definition_format(definition.definition)
data = RE_HANGUL.sub(generate_translate_tag, data)
return HttpResponse(data) | 2.21875 | 2 |
appdaemon/apps/toggle_switch/toggle_switch.py | Mithras/ha | 3 | 3007 | <reponame>Mithras/ha
import globals
class ToggleSwitch(globals.Hass):
async def initialize(self):
config = self.args["config"]
self._input = config["input"]
self._toggle_service = config["toggle_service"]
self._toggle_payload = config["toggle_payload"]
self._power = config["power"]
self._power_on_threshold = float(config["power_on_threshold"])
self._check_interval = float(config["check_interval"])
self.ensure_state_task = await self.create_task(
self._ensure_state_async(False))
await self.listen_state(self._input_callback_async,
entity=self._input)
async def terminate(self):
# self.log("Terminate")
self.ensure_state_task.cancel()
async def _input_callback_async(self, entity, attribute, old, new, kwargs):
if old == new:
return
# self.log(f"InputChange: old = {old}, new = {new}")
self.ensure_state_task.cancel()
self.ensure_state_task = await self.create_task(self._ensure_state_async())
async def _ensure_state_async(self, immediate=True):
# self.log(f"EnsureState: immediate = {immediate}")
if immediate:
await self._toggle_async()
while True:
await self.sleep(self._check_interval)
power = float(await self.get_state(self._power))
input = await self.get_state(self._input)
# self.log(
# f"EnsureState: input = {input}, power: {power}")
if input == "on" and power < self._power_on_threshold or input == "off" and power > self._power_on_threshold:
await self._toggle_async()
async def _toggle_async(self):
# self.log("Toggle")
await self.call_service(self._toggle_service,
**self._toggle_payload)
| 2.34375 | 2 |
templates_deepdive_app_bagofwords/udf/dd_extract_features.py | charlieccarey/rdoc | 0 | 3008 | <filename>templates_deepdive_app_bagofwords/udf/dd_extract_features.py<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
'''
1\taaaa~^~bbbb~^~cccc
2\tdddd~^~EEEE~^~ffff
'''
import sys
ARR_DELIM = '~^~'
for row in sys.stdin:
row = row.strip()
sent_id, lemmas = row.split('\t')
lemmas = lemmas.split(ARR_DELIM)
for lemma in lemmas:
print('{}\t{}'.format(sent_id, lemma))
| 2.390625 | 2 |
src/supplier/templates/supplier/urls.py | vandana0608/Pharmacy-Managament | 0 | 3009 | from django.urls import path
from . import views
urlpatterns = [
path('', views.SupplierList.as_view(), name='supplier_list'),
path('view/<int:pk>', views.SupplierView.as_view(), name='supplier_view'),
path('new', views.SupplierCreate.as_view(), name='supplier_new'),
path('view/<int:pk>', views.SupplierView.as_view(), name='supplier_view'),
path('edit/<int:pk>', views.SupplierUpdate.as_view(), name='supplier_edit'),
path('delete/<int:pk>', views.SupplierDelete.as_view(), name='supplier_delete'),
] | 1.765625 | 2 |
web_scraper/extract/common.py | rarc41/web_scraper_pro | 0 | 3010 | <reponame>rarc41/web_scraper_pro<filename>web_scraper/extract/common.py
import yaml
__config=None
def config():
global __config
if not __config:
with open('config.yaml', mode='r') as f:
__config=yaml.safe_load(f)
return __config | 2.0625 | 2 |
engine/config/constant.py | infiniteloop98/lazies-cmd | 1 | 3011 | APP_PROFILE_DIRECTORY_NAME = 'lazies-cmd'
DOSKEY_FILE_NAME = 'doskey.bat'
AUTO_RUN_REGISTRY_NAME = 'AutoRun'
| 1.117188 | 1 |
sequence/get_seqs_from_list.py | fanglu01/cDNA_Cupcake | 1 | 3012 | #!/usr/bin/env python
import os, sys
from Bio import SeqIO
def get_seqs_from_list(fastafile, listfile):
seqs = [line.strip() for line in open(listfile)]
for r in SeqIO.parse(open(fastafile), 'fasta'):
if r.id in seqs or r.id.split('|')[0] in seqs or any(r.id.startswith(x) for x in seqs):
print ">" + r.id
print r.seq
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser("Get sequences from a fasta file from a list")
parser.add_argument("fasta_filename", help="Input fasta filename to extract sequences from")
parser.add_argument("list_filename", help="List of sequence IDs to extract")
args = parser.parse_args()
get_seqs_from_list(args.fasta_filename, args.list_filename)
| 3.578125 | 4 |
ppos_dex_data.py | cusma/pposdex | 10 | 3013 | <gh_stars>1-10
import time
import json
import base64
import msgpack
from schema import Schema, And, Optional
from datetime import datetime
from algosdk import mnemonic
from algosdk.account import address_from_private_key
from algosdk.error import *
from algosdk.future.transaction import PaymentTxn
from inequality_indexes import *
from algo_query import *
def wait_for_confirmation(algod_client, transaction_id, timeout):
"""Wait until the transaction is confirmed or rejected, or until 'timeout'
number of rounds have passed.
Args:
algod_client (AlgodClient): Algod Client
transaction_id (str): the transaction to wait for
timeout (int): maximum number of rounds to wait
Returns:
(dict): pending transaction information, or throws an error if the
transaction is not confirmed or rejected in the next timeout rounds
"""
start_round = algod_client.status()["last-round"] + 1
current_round = start_round
while current_round < start_round + timeout:
algod_client.status_after_block(current_round)
try:
pending_txn = algod_client.pending_transaction_info(transaction_id)
except Exception:
return
if pending_txn.get("confirmed-round", 0) > 0:
return pending_txn
elif pending_txn["pool-error"]:
raise Exception(
'pool error: {}'.format(pending_txn["pool-error"]))
current_round += 1
raise Exception(
'pending tx not found in timeout rounds, timeout value = : {}'.format(
timeout))
def post_ppos_dex_data(algod_client, indexer_client, passphrase,
algo_threshold):
private_key = mnemonic.to_private_key(passphrase)
account = {'pk': address_from_private_key(private_key),
'sk': private_key}
CONNECTION_ATTEMPT_DELAY_SEC = 3
MAX_CONNECTION_ATTEMPTS = 10
MICROALGO_TO_ALGO = 1 / 10 ** 6
MICROALGO_TOTAL_SUPPLY = 10 ** 16
attempts = 1
params = None
ledger = None
while attempts <= MAX_CONNECTION_ATTEMPTS:
try:
params = algod_client.suggested_params()
ledger = algod_client.ledger_supply()
break
except AlgodHTTPError:
print(f"Algod Client connection attempt "
f"{attempts}/{MAX_CONNECTION_ATTEMPTS}")
print("Trying to contact Algod Client again...")
time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)
finally:
attempts += 1
if attempts > MAX_CONNECTION_ATTEMPTS:
quit("Unable to connect to Algod Client.")
attempts = 1
algo_owners = None
while attempts <= MAX_CONNECTION_ATTEMPTS:
try:
algo_owners = get_algo_owners(indexer_client, algo_threshold)
break
except IndexerHTTPError:
print(f"Indexer Client connection attempt "
f"{attempts}/{MAX_CONNECTION_ATTEMPTS}")
print("Trying to contact Indexer Client again...")
time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)
finally:
attempts += 1
if attempts > MAX_CONNECTION_ATTEMPTS:
quit("Unable to connect to Indexer Client.")
stakes = [account['amount'] * MICROALGO_TO_ALGO for
account in algo_owners]
algo_hhi = herfindahl_hirschman_index(stakes)
online_stakes = [account['amount'] * MICROALGO_TO_ALGO
for account in algo_owners
if account['status'] == 'Online']
algo_dynamics = ledger['total-money'] / MICROALGO_TOTAL_SUPPLY
ppos_online_stake = ledger['online-money'] / ledger['total-money']
ppos_online_accounts = len(online_stakes) / len(algo_owners)
ppos_gini = gini_index(online_stakes)
ppos_theil_l = theil_l_index(online_stakes)
ppos_theil_t = theil_t_index(online_stakes)
ppos_hhi = herfindahl_hirschman_index(online_stakes)
ppos_dex = (algo_dynamics
* ppos_online_stake
* ppos_online_accounts
* (1 - ppos_gini))
note = {'algo_threshold': algo_threshold,
'accounts': len(algo_owners),
'algo_hhi': algo_hhi,
'algo_dynamics': algo_dynamics,
'ppos_online_stake': ppos_online_stake,
'ppos_online_accounts': ppos_online_accounts,
'ppos_gini': ppos_gini,
'ppos_theil_l': ppos_theil_l,
'ppos_theil_t': ppos_theil_t,
'ppos_hhi': ppos_hhi,
'ppos_dex': ppos_dex,
'timestamp': str(datetime.now())}
bytes_note = msgpack.packb(note)
unsigned_txn = PaymentTxn(sender=account['pk'],
sp=params,
receiver=account['pk'],
amt=0,
note=bytes_note)
signed_txn = unsigned_txn.sign(account['sk'])
txid = algod_client.send_transaction(signed_txn)
print("Publishing Algorand PPoS Dex data in txID: {}".format(txid))
try:
confirmed_txn = wait_for_confirmation(algod_client, txid, 4)
except Exception as err:
print(err)
return
print("txID: {}".format(txid), " confirmed in round: {}\n".format(
confirmed_txn.get("confirmed-round", 0)))
print("Transaction information:\n{}".format(
json.dumps(confirmed_txn, indent=4)))
def get_ppos_dex_data(indexer_client, ppos_dex_address, algo_threshold,
start_block=11476070, end_block=None):
CONNECTION_ATTEMPT_DELAY_SEC = 3
MAX_CONNECTION_ATTEMPTS = 10
attempts = 1
ppos_dex_txns_note = None
while attempts <= MAX_CONNECTION_ATTEMPTS:
try:
ppos_dex_txns_note = get_address_txns_note(
indexer_client, ppos_dex_address, start_block, end_block)
break
except IndexerHTTPError:
print(f"Indexer Client connection attempt "
f"{attempts}/{MAX_CONNECTION_ATTEMPTS}")
print("Trying to contact Indexer Client again...")
time.sleep(CONNECTION_ATTEMPT_DELAY_SEC)
finally:
attempts += 1
if attempts > MAX_CONNECTION_ATTEMPTS:
quit("Unable to connect to Indexer Client.")
# TODO: make 'algo_hhi' and 'ppos_hhi' mandatory fileds in the schema
schema = Schema({
'algo_threshold': int,
'accounts': And(int, lambda n: 0 <= n),
Optional('algo_hhi'): And(float, lambda n: 0 <= n <= 1),
'algo_dynamics': And(float, lambda n: 0 <= n),
'ppos_online_stake': And(float, lambda n: 0 <= n <= 1),
'ppos_online_accounts': And(float, lambda n: 0 <= n <= 1),
'ppos_gini': And(float, lambda n: 0 <= n <= 1),
'ppos_theil_l': And(float, lambda n: 0 <= n),
'ppos_theil_t': And(float, lambda n: 0 <= n),
Optional('ppos_hhi'): And(float, lambda n: 0 <= n <= 1),
'ppos_dex': And(float, lambda n: 0 <= n <= 1),
'timestamp': str
})
ppos_dex_data = []
for txn_note in ppos_dex_txns_note:
try:
data = schema.validate(
msgpack.unpackb(base64.b64decode(txn_note))
)
if data['algo_threshold'] == algo_threshold:
ppos_dex_data += [data]
except:
pass
if not ppos_dex_data:
quit(f"Impossible to find valid PPos Dex data published by "
f"{ppos_dex_address} starting from block {start_block}.")
return ppos_dex_data
| 2.25 | 2 |
src/test/python/programmingtheiot/part01/unit/system/SystemMemUtilTaskTest.py | Zhengrui-Liu/FireAlarmingSysCDA | 0 | 3014 | #####
#
# This class is part of the Programming the Internet of Things
# project, and is available via the MIT License, which can be
# found in the LICENSE file at the top level of this repository.
#
# Copyright (c) 2020 by <NAME>
#
import logging
import unittest
from programmingtheiot.cda.system.SystemMemUtilTask import SystemMemUtilTask
class SystemMemUtilTaskTest(unittest.TestCase):
"""
This test case class contains very basic unit tests for
SystemMemUtilTask. It should not be considered complete,
but serve as a starting point for the student implementing
additional functionality within their Programming the IoT
environment.
"""
@classmethod
def setUpClass(self):
logging.basicConfig(format = '%(asctime)s:%(module)s:%(levelname)s:%(message)s', level = logging.DEBUG)
logging.info("Testing SystemMemUtilTask class...")
self.memUtilTask = SystemMemUtilTask()
def setUp(self):
pass
def tearDown(self):
pass
<EMAIL>("Ignore for now.")
def testGenerateTelemetry(self):
"""Test get memory utilization from sensor data
"""
sd = self.memUtilTask.generateTelemetry()
self.assertIsNotNone(sd)
self.assertGreaterEqual(sd.getValue(), 0.0)
logging.info("Virtual memory SensorData: %s", str(sd))
def testGetTelemetryValue(self):
"""Test get memory utilization
"""
val = self.memUtilTask.getTelemetryValue()
self.assertGreaterEqual(val, 0.0)
logging.info("Virtual memory utilization: %s", str(val))
if __name__ == "__main__":
unittest.main()
| 2.9375 | 3 |
api/base/settings/defaults.py | mattclark/osf.io | 0 | 3015 | """
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'osf.db.backends.postgresql', # django.db.backends.postgresql
'NAME': os.environ.get('OSF_DB_NAME', 'osf'),
'USER': os.environ.get('OSF_DB_USER', 'postgres'),
'PASSWORD': os.environ.get('OSF_DB_PASSWORD', ''),
'HOST': os.environ.get('OSF_DB_HOST', '127.0.0.1'),
'PORT': os.environ.get('OSF_DB_PORT', '5432'),
'ATOMIC_REQUESTS': True,
'TEST': {
'SERIALIZE': False,
},
},
}
DATABASE_ROUTERS = ['osf.db.router.PostgreSQLFailoverRouter', ]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_USER_MODEL = 'osf.OSFUser'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
AUTHENTICATION_BACKENDS = (
'api.base.authentication.backends.ODMBackend',
'guardian.backends.ObjectPermissionBackend',
)
# SECURITY WARNING: don't run with debug turned on in production!
DEV_MODE = osf_settings.DEV_MODE
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'api'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'api-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
CSRF_COOKIE_HTTPONLY = osf_settings.SECURE_MODE
ALLOWED_HOSTS = [
'.osf.io',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.admin',
# 3rd party
'django_celery_beat',
'django_celery_results',
'rest_framework',
'corsheaders',
'raven.contrib.django.raven_compat',
'django_extensions',
'guardian',
'storages',
'waffle',
'elasticsearch_metrics',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.bitbucket',
'addons.box',
'addons.dataverse',
'addons.dropbox',
'addons.figshare',
'addons.forward',
'addons.github',
'addons.gitlab',
'addons.googledrive',
'addons.mendeley',
'addons.onedrive',
'addons.owncloud',
'addons.s3',
'addons.twofactor',
'addons.wiki',
'addons.zotero',
)
# local development using https
if osf_settings.SECURE_MODE and DEBUG:
INSTALLED_APPS += ('sslserver',)
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'api'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
BULK_SETTINGS = {
'DEFAULT_BULK_LIMIT': 100,
}
MAX_PAGE_SIZE = 100
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_RENDERER_CLASSES': (
'api.base.renderers.JSONAPIRenderer',
'api.base.renderers.JSONRendererWithESISupport',
'api.base.renderers.BrowsableAPIRendererNoForms',
),
'DEFAULT_PARSER_CLASSES': (
'api.base.parsers.JSONAPIParser',
'api.base.parsers.JSONAPIParserForRegularJSON',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
),
'EXCEPTION_HANDLER': 'api.base.exceptions.json_api_exception_handler',
'DEFAULT_CONTENT_NEGOTIATION_CLASS': 'api.base.content_negotiation.JSONAPIContentNegotiation',
'DEFAULT_VERSIONING_CLASS': 'api.base.versioning.BaseVersioning',
'DEFAULT_VERSION': '2.0',
'ALLOWED_VERSIONS': (
'2.0',
'2.1',
'2.2',
'2.3',
'2.4',
'2.5',
'2.6',
'2.7',
'2.8',
'2.9',
'2.10',
'2.11',
'2.12',
'2.13',
'2.14',
'2.15',
'2.16',
'2.17',
),
'DEFAULT_FILTER_BACKENDS': ('api.base.filters.OSFOrderingFilter',),
'DEFAULT_PAGINATION_CLASS': 'api.base.pagination.JSONAPIPagination',
'ORDERING_PARAM': 'sort',
'DEFAULT_AUTHENTICATION_CLASSES': (
# Custom auth classes
'api.base.authentication.drf.OSFBasicAuthentication',
'api.base.authentication.drf.OSFSessionAuthentication',
'api.base.authentication.drf.OSFCASAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'api.base.throttling.NonCookieAuthThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'user': '10000/day',
'non-cookie-auth': '100/hour',
'add-contributor': '10/second',
'create-guid': '1000/hour',
'root-anon-throttle': '1000/hour',
'test-user': '2/hour',
'test-anon': '1/hour',
'send-email': '2/minute',
},
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
# This needs to remain True to allow cross origin requests that are in CORS_ORIGIN_WHITELIST to
# use cookies.
CORS_ALLOW_CREDENTIALS = True
# Set dynamically on app init
ORIGINS_WHITELIST = ()
MIDDLEWARE = (
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.PostcommitTaskMiddleware',
# A profiling middleware. ONLY FOR DEV USE
# Uncomment and add "prof" to url params to recieve a profile for that url
# 'api.base.middleware.ProfileMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'api.base.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'waffle.middleware.WaffleMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
},
]
ROOT_URLCONF = 'api.base.urls'
WSGI_APPLICATION = 'api.base.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://django-storages.readthedocs.io/en/latest/backends/gcloud.html
if os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', False):
# Required to interact with Google Cloud Storage
DEFAULT_FILE_STORAGE = 'api.base.storage.RequestlessURLGoogleCloudStorage'
GS_BUCKET_NAME = os.environ.get('GS_BUCKET_NAME', 'cos-osf-stage-cdn-us')
GS_FILE_OVERWRITE = os.environ.get('GS_FILE_OVERWRITE', False)
elif osf_settings.DEV_MODE or osf_settings.DEBUG_MODE:
DEFAULT_FILE_STORAGE = 'api.base.storage.DevFileSystemStorage'
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static/vendor')
API_BASE = 'v2/'
API_PRIVATE_BASE = '_/'
STATIC_URL = '/static/'
NODE_CATEGORY_MAP = osf_settings.NODE_CATEGORY_MAP
DEBUG_TRANSACTIONS = DEBUG
JWT_SECRET = 'osf_api_cas_login_jwt_secret_32b'
JWE_SECRET = 'osf_api_cas_login_jwe_secret_32b'
ENABLE_VARNISH = osf_settings.ENABLE_VARNISH
ENABLE_ESI = osf_settings.ENABLE_ESI
VARNISH_SERVERS = osf_settings.VARNISH_SERVERS
ESI_MEDIA_TYPES = osf_settings.ESI_MEDIA_TYPES
ADDONS_FOLDER_CONFIGURABLE = ['box', 'dropbox', 's3', 'googledrive', 'figshare', 'owncloud', 'onedrive']
ADDONS_OAUTH = ADDONS_FOLDER_CONFIGURABLE + ['dataverse', 'github', 'bitbucket', 'gitlab', 'mendeley', 'zotero', 'forward']
BYPASS_THROTTLE_TOKEN = '<PASSWORD>'
OSF_SHELL_USER_IMPORTS = None
# Settings for use in the admin
OSF_URL = 'https://osf.io'
SELECT_FOR_UPDATE_ENABLED = True
# Disable anonymous user permissions in django-guardian
ANONYMOUS_USER_NAME = None
# If set to True, automated tests with extra queries will fail.
NPLUSONE_RAISE = False
# salt used for generating hashids
HASHIDS_SALT = 'pinkhimalayan'
# django-elasticsearch-metrics
ELASTICSEARCH_DSL = {
'default': {
'hosts': os.environ.get('ELASTIC6_URI', '127.0.0.1:9201'),
'retry_on_timeout': True,
},
}
# Store yearly indices for time-series metrics
ELASTICSEARCH_METRICS_DATE_FORMAT = '%Y'
WAFFLE_CACHE_NAME = 'waffle_cache'
STORAGE_USAGE_CACHE_NAME = 'storage_usage'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
STORAGE_USAGE_CACHE_NAME: {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'osf_cache_table',
},
WAFFLE_CACHE_NAME: {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
| 1.945313 | 2 |
tests/__init__.py | GTedHa/gblackboard | 0 | 3016 | # -*- coding: utf-8 -*-
"""Unit test package for gblackboard."""
| 0.875 | 1 |
src/fabricflow/fibc/api/fibcapis_pb2_grpc.py | RudSmith/beluganos | 119 | 3017 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import fibcapi_pb2 as fibcapi__pb2
import fibcapis_pb2 as fibcapis__pb2
class FIBCApApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCApApi/Monitor',
request_serializer=fibcapis__pb2.ApMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.ApMonitorReply.FromString,
)
self.GetPortStats = channel.unary_stream(
'/fibcapi.FIBCApApi/GetPortStats',
request_serializer=fibcapis__pb2.ApGetPortStatsRequest.SerializeToString,
response_deserializer=fibcapi__pb2.FFPortStats.FromString,
)
self.ModPortStats = channel.unary_unary(
'/fibcapi.FIBCApApi/ModPortStats',
request_serializer=fibcapis__pb2.ApModPortStatsRequest.SerializeToString,
response_deserializer=fibcapis__pb2.ApModPortStatsReply.FromString,
)
self.GetPortEntries = channel.unary_stream(
'/fibcapi.FIBCApApi/GetPortEntries',
request_serializer=fibcapis__pb2.ApGetPortEntriesRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DbPortEntry.FromString,
)
self.GetIDEntries = channel.unary_stream(
'/fibcapi.FIBCApApi/GetIDEntries',
request_serializer=fibcapis__pb2.ApGetIdEntriesRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DbIdEntry.FromString,
)
self.GetDpEntries = channel.unary_stream(
'/fibcapi.FIBCApApi/GetDpEntries',
request_serializer=fibcapis__pb2.ApGetDpEntriesRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DbDpEntry.FromString,
)
self.AddPortEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/AddPortEntry',
request_serializer=fibcapis__pb2.DbPortEntry.SerializeToString,
response_deserializer=fibcapis__pb2.ApAddPortEntryReply.FromString,
)
self.AddIDEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/AddIDEntry',
request_serializer=fibcapis__pb2.DbIdEntry.SerializeToString,
response_deserializer=fibcapis__pb2.ApAddIdEntryReply.FromString,
)
self.DelPortEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/DelPortEntry',
request_serializer=fibcapis__pb2.DbPortKey.SerializeToString,
response_deserializer=fibcapis__pb2.ApDelPortEntryReply.FromString,
)
self.DelIDEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/DelIDEntry',
request_serializer=fibcapis__pb2.DbIdEntry.SerializeToString,
response_deserializer=fibcapis__pb2.ApDelIdEntryReply.FromString,
)
self.GetStats = channel.unary_stream(
'/fibcapi.FIBCApApi/GetStats',
request_serializer=fibcapis__pb2.ApGetStatsRequest.SerializeToString,
response_deserializer=fibcapis__pb2.StatsEntry.FromString,
)
self.RunOAM = channel.unary_unary(
'/fibcapi.FIBCApApi/RunOAM',
request_serializer=fibcapi__pb2.OAM.Request.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
class FIBCApApiServicer(object):
# missing associated documentation comment in .proto file
pass
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPortStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModPortStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPortEntries(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIDEntries(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDpEntries(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddPortEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddIDEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DelPortEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DelIDEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunOAM(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCApApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.ApMonitorRequest.FromString,
response_serializer=fibcapis__pb2.ApMonitorReply.SerializeToString,
),
'GetPortStats': grpc.unary_stream_rpc_method_handler(
servicer.GetPortStats,
request_deserializer=fibcapis__pb2.ApGetPortStatsRequest.FromString,
response_serializer=fibcapi__pb2.FFPortStats.SerializeToString,
),
'ModPortStats': grpc.unary_unary_rpc_method_handler(
servicer.ModPortStats,
request_deserializer=fibcapis__pb2.ApModPortStatsRequest.FromString,
response_serializer=fibcapis__pb2.ApModPortStatsReply.SerializeToString,
),
'GetPortEntries': grpc.unary_stream_rpc_method_handler(
servicer.GetPortEntries,
request_deserializer=fibcapis__pb2.ApGetPortEntriesRequest.FromString,
response_serializer=fibcapis__pb2.DbPortEntry.SerializeToString,
),
'GetIDEntries': grpc.unary_stream_rpc_method_handler(
servicer.GetIDEntries,
request_deserializer=fibcapis__pb2.ApGetIdEntriesRequest.FromString,
response_serializer=fibcapis__pb2.DbIdEntry.SerializeToString,
),
'GetDpEntries': grpc.unary_stream_rpc_method_handler(
servicer.GetDpEntries,
request_deserializer=fibcapis__pb2.ApGetDpEntriesRequest.FromString,
response_serializer=fibcapis__pb2.DbDpEntry.SerializeToString,
),
'AddPortEntry': grpc.unary_unary_rpc_method_handler(
servicer.AddPortEntry,
request_deserializer=fibcapis__pb2.DbPortEntry.FromString,
response_serializer=fibcapis__pb2.ApAddPortEntryReply.SerializeToString,
),
'AddIDEntry': grpc.unary_unary_rpc_method_handler(
servicer.AddIDEntry,
request_deserializer=fibcapis__pb2.DbIdEntry.FromString,
response_serializer=fibcapis__pb2.ApAddIdEntryReply.SerializeToString,
),
'DelPortEntry': grpc.unary_unary_rpc_method_handler(
servicer.DelPortEntry,
request_deserializer=fibcapis__pb2.DbPortKey.FromString,
response_serializer=fibcapis__pb2.ApDelPortEntryReply.SerializeToString,
),
'DelIDEntry': grpc.unary_unary_rpc_method_handler(
servicer.DelIDEntry,
request_deserializer=fibcapis__pb2.DbIdEntry.FromString,
response_serializer=fibcapis__pb2.ApDelIdEntryReply.SerializeToString,
),
'GetStats': grpc.unary_stream_rpc_method_handler(
servicer.GetStats,
request_deserializer=fibcapis__pb2.ApGetStatsRequest.FromString,
response_serializer=fibcapis__pb2.StatsEntry.SerializeToString,
),
'RunOAM': grpc.unary_unary_rpc_method_handler(
servicer.RunOAM,
request_deserializer=fibcapi__pb2.OAM.Request.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCApApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FIBCVmApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendHello = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendHello',
request_serializer=fibcapi__pb2.Hello.SerializeToString,
response_deserializer=fibcapis__pb2.HelloReply.FromString,
)
self.SendPortConfig = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendPortConfig',
request_serializer=fibcapi__pb2.PortConfig.SerializeToString,
response_deserializer=fibcapis__pb2.PortConfigReply.FromString,
)
self.SendFlowMod = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendFlowMod',
request_serializer=fibcapi__pb2.FlowMod.SerializeToString,
response_deserializer=fibcapis__pb2.FlowModReply.FromString,
)
self.SendGroupMod = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendGroupMod',
request_serializer=fibcapi__pb2.GroupMod.SerializeToString,
response_deserializer=fibcapis__pb2.GroupModReply.FromString,
)
self.SendOAMReply = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendOAMReply',
request_serializer=fibcapis__pb2.OAMReply.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCVmApi/Monitor',
request_serializer=fibcapis__pb2.VmMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.VmMonitorReply.FromString,
)
class FIBCVmApiServicer(object):
# missing associated documentation comment in .proto file
pass
def SendHello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPortConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendFlowMod(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendGroupMod(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendOAMReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCVmApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendHello': grpc.unary_unary_rpc_method_handler(
servicer.SendHello,
request_deserializer=fibcapi__pb2.Hello.FromString,
response_serializer=fibcapis__pb2.HelloReply.SerializeToString,
),
'SendPortConfig': grpc.unary_unary_rpc_method_handler(
servicer.SendPortConfig,
request_deserializer=fibcapi__pb2.PortConfig.FromString,
response_serializer=fibcapis__pb2.PortConfigReply.SerializeToString,
),
'SendFlowMod': grpc.unary_unary_rpc_method_handler(
servicer.SendFlowMod,
request_deserializer=fibcapi__pb2.FlowMod.FromString,
response_serializer=fibcapis__pb2.FlowModReply.SerializeToString,
),
'SendGroupMod': grpc.unary_unary_rpc_method_handler(
servicer.SendGroupMod,
request_deserializer=fibcapi__pb2.GroupMod.FromString,
response_serializer=fibcapis__pb2.GroupModReply.SerializeToString,
),
'SendOAMReply': grpc.unary_unary_rpc_method_handler(
servicer.SendOAMReply,
request_deserializer=fibcapis__pb2.OAMReply.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.VmMonitorRequest.FromString,
response_serializer=fibcapis__pb2.VmMonitorReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCVmApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FIBCVsApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendHello = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendHello',
request_serializer=fibcapi__pb2.FFHello.SerializeToString,
response_deserializer=fibcapis__pb2.FFHelloReply.FromString,
)
self.SendFFPacket = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendFFPacket',
request_serializer=fibcapi__pb2.FFPacket.SerializeToString,
response_deserializer=fibcapis__pb2.FFPacketReply.FromString,
)
self.SendPacketIn = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendPacketIn',
request_serializer=fibcapi__pb2.FFPacketIn.SerializeToString,
response_deserializer=fibcapis__pb2.FFPacketInReply.FromString,
)
self.SendOAMReply = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendOAMReply',
request_serializer=fibcapis__pb2.OAMReply.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCVsApi/Monitor',
request_serializer=fibcapis__pb2.VsMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.VsMonitorReply.FromString,
)
class FIBCVsApiServicer(object):
# missing associated documentation comment in .proto file
pass
def SendHello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendFFPacket(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPacketIn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendOAMReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCVsApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendHello': grpc.unary_unary_rpc_method_handler(
servicer.SendHello,
request_deserializer=fibcapi__pb2.FFHello.FromString,
response_serializer=fibcapis__pb2.FFHelloReply.SerializeToString,
),
'SendFFPacket': grpc.unary_unary_rpc_method_handler(
servicer.SendFFPacket,
request_deserializer=fibcapi__pb2.FFPacket.FromString,
response_serializer=fibcapis__pb2.FFPacketReply.SerializeToString,
),
'SendPacketIn': grpc.unary_unary_rpc_method_handler(
servicer.SendPacketIn,
request_deserializer=fibcapi__pb2.FFPacketIn.FromString,
response_serializer=fibcapis__pb2.FFPacketInReply.SerializeToString,
),
'SendOAMReply': grpc.unary_unary_rpc_method_handler(
servicer.SendOAMReply,
request_deserializer=fibcapis__pb2.OAMReply.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.VsMonitorRequest.FromString,
response_serializer=fibcapis__pb2.VsMonitorReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCVsApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FIBCDpApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendHello = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendHello',
request_serializer=fibcapi__pb2.FFHello.SerializeToString,
response_deserializer=fibcapis__pb2.FFHelloReply.FromString,
)
self.SendPacketIn = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendPacketIn',
request_serializer=fibcapi__pb2.FFPacketIn.SerializeToString,
response_deserializer=fibcapis__pb2.FFPacketInReply.FromString,
)
self.SendPortStatus = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendPortStatus',
request_serializer=fibcapi__pb2.FFPortStatus.SerializeToString,
response_deserializer=fibcapis__pb2.FFPortStatusReply.FromString,
)
self.SendL2AddrStatus = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendL2AddrStatus',
request_serializer=fibcapi__pb2.FFL2AddrStatus.SerializeToString,
response_deserializer=fibcapis__pb2.L2AddrStatusReply.FromString,
)
self.SendMultipartReply = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendMultipartReply',
request_serializer=fibcapis__pb2.DpMultipartReply.SerializeToString,
response_deserializer=fibcapis__pb2.DpMultipartReplyAck.FromString,
)
self.SendOAMReply = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendOAMReply',
request_serializer=fibcapis__pb2.OAMReply.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCDpApi/Monitor',
request_serializer=fibcapis__pb2.DpMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DpMonitorReply.FromString,
)
class FIBCDpApiServicer(object):
# missing associated documentation comment in .proto file
pass
def SendHello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPacketIn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPortStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendL2AddrStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendMultipartReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendOAMReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCDpApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendHello': grpc.unary_unary_rpc_method_handler(
servicer.SendHello,
request_deserializer=fibcapi__pb2.FFHello.FromString,
response_serializer=fibcapis__pb2.FFHelloReply.SerializeToString,
),
'SendPacketIn': grpc.unary_unary_rpc_method_handler(
servicer.SendPacketIn,
request_deserializer=fibcapi__pb2.FFPacketIn.FromString,
response_serializer=fibcapis__pb2.FFPacketInReply.SerializeToString,
),
'SendPortStatus': grpc.unary_unary_rpc_method_handler(
servicer.SendPortStatus,
request_deserializer=fibcapi__pb2.FFPortStatus.FromString,
response_serializer=fibcapis__pb2.FFPortStatusReply.SerializeToString,
),
'SendL2AddrStatus': grpc.unary_unary_rpc_method_handler(
servicer.SendL2AddrStatus,
request_deserializer=fibcapi__pb2.FFL2AddrStatus.FromString,
response_serializer=fibcapis__pb2.L2AddrStatusReply.SerializeToString,
),
'SendMultipartReply': grpc.unary_unary_rpc_method_handler(
servicer.SendMultipartReply,
request_deserializer=fibcapis__pb2.DpMultipartReply.FromString,
response_serializer=fibcapis__pb2.DpMultipartReplyAck.SerializeToString,
),
'SendOAMReply': grpc.unary_unary_rpc_method_handler(
servicer.SendOAMReply,
request_deserializer=fibcapis__pb2.OAMReply.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.DpMonitorRequest.FromString,
response_serializer=fibcapis__pb2.DpMonitorReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCDpApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 1.71875 | 2 |
cymbology/identifiers/__init__.py | pmart123/security_id | 12 | 3018 | <gh_stars>10-100
from cymbology.identifiers.sedol import Sedol
from cymbology.identifiers.cusip import Cusip, cusip_from_isin
from cymbology.identifiers.isin import Isin
__all__ = ('Sedol', 'Cusip', 'cusip_from_isin', 'Isin')
| 1.734375 | 2 |
api/src/error_report/models.py | Noahffiliation/corpus-christi | 35 | 3019 | from marshmallow import Schema, fields
from marshmallow.validate import Range, Length
from sqlalchemy import Column, Integer, Boolean, DateTime
from ..db import Base
from ..shared.models import StringTypes
# ---- Error-report
class ErrorReport(Base):
__tablename__ = 'error_report'
id = Column(Integer, primary_key=True)
description = Column(StringTypes.LONG_STRING, nullable=False)
time_stamp = Column(DateTime)
status_code = Column(Integer)
endpoint = Column(StringTypes.MEDIUM_STRING)
solved = Column(Boolean, default=False)
def __repr__(self):
return f"<Error-report(id={self.id})>"
class ErrorReportSchema(Schema):
id = fields.Integer(dump_only=True, required=True, validate=Range(min=1))
description = fields.String(required=True, validate=Length(min=1))
time_stamp = fields.DateTime()
status_code = fields.Integer()
endpoint = fields.String()
solved = fields.Boolean()
| 2.421875 | 2 |
Python/Vowel-Substring/solution.py | arpitran/HackerRank_solutions | 0 | 3020 | <reponame>arpitran/HackerRank_solutions
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'findSubstring' function below.
#
# The function is expected to return a STRING.
# The function accepts following parameters:
# 1. STRING s
# 2. INTEGER k
#
def isVowel(x):
if(x=="a" or x=='e' or x=='i' or x=='o' or x=='u'):
return True
return False
def vowelcount(x):
lowercase = x.lower()
vowel_counts = {}
for vowel in "aeiou":
count = lowercase.count(vowel)
vowel_counts[vowel] = count
counts = vowel_counts.values()
total_vowels = sum(counts)
return total_vowels
def findSubstring(s, k):
test_str = s
count = 0
sub_string = {}
res = [test_str[i: j] for i in range(len(test_str)) for j in range(i+1, len(test_str)+1) if len(test_str[i:j])==k]
for i in res:
sub_string[i]=vowelcount(i)
if sub_string.get(max(sub_string,key=sub_string.get))==0:
return "Not found!"
else:
return str(max(sub_string,key=sub_string.get))
# Write your code here
| 3.984375 | 4 |
nima/models/productos/constants.py | erichav/NIMA | 0 | 3021 | <reponame>erichav/NIMA
COLLECTION = 'productos' | 0.910156 | 1 |
deepchem/metrics/score_function.py | hsjang001205/deepchem | 1 | 3022 | <reponame>hsjang001205/deepchem<filename>deepchem/metrics/score_function.py
"""Evaluation metrics."""
import numpy as np
from sklearn.metrics import matthews_corrcoef # noqa
from sklearn.metrics import recall_score # noqa
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import r2_score # noqa
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import precision_score # noqa
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc
from sklearn.metrics import jaccard_score
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score # noqa
from sklearn.metrics import accuracy_score # noqa
from sklearn.metrics import balanced_accuracy_score # noqa
from scipy.stats import pearsonr
# kappa_score is an alias for `sklearn.metrics.cohen_kappa_score`
kappa_score = cohen_kappa_score
def pearson_r2_score(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes Pearson R^2 (square of Pearson correlation).
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
float
The Pearson-R^2 score.
"""
return pearsonr(y, y_pred)[0]**2
def jaccard_index(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes Jaccard Index which is the Intersection Over Union metric
which is commonly used in image segmentation tasks.
DEPRECATED: WILL BE REMOVED IN A FUTURE VERSION OF DEEEPCHEM. USE `jaccard_score` instead.
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
score: float
The jaccard index. A number between 0 and 1.
"""
return jaccard_score(y, y_pred)
def pixel_error(y: np.ndarray, y_pred: np.ndarray) -> float:
"""An error metric in case y, y_pred are images.
Defined as 1 - the maximal F-score of pixel similarity, or squared
Euclidean distance between the original and the result labels.
Parameters
----------
y: np.ndarray
ground truth array
y_pred: np.ndarray
predicted array
Returns
-------
score: float
The pixel-error. A number between 0 and 1.
"""
return 1 - f1_score(y, y_pred)
def prc_auc_score(y: np.ndarray, y_pred: np.ndarray) -> float:
"""Compute area under precision-recall curve
Parameters
----------
y: np.ndarray
A numpy array of shape `(N, n_classes)` or `(N,)` with true labels
y_pred: np.ndarray
Of shape `(N, n_classes)` with class probabilities.
Returns
-------
float
The area under the precision-recall curve. A number between 0 and 1.
"""
precision, recall, _ = precision_recall_curve(y[:, 1], y_pred[:, 1])
return auc(recall, precision)
def rms_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes RMS error."""
return np.sqrt(mean_squared_error(y_true, y_pred))
def mae_score(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Computes MAE."""
return mean_absolute_error(y_true, y_pred)
def bedroc_score(y_true: np.ndarray, y_pred: np.ndarray, alpha: float = 20.0):
"""Compute BEDROC metric.
BEDROC metric implemented according to Truchon and Bayley that modifies
the ROC score by allowing for a factor of early recognition.
Please confirm details from [1]_.
Parameters
----------
y_true: np.ndarray
Binary class labels. 1 for positive class, 0 otherwise
y_pred: np.ndarray
Predicted labels
alpha: float, default 20.0
Early recognition parameter
Returns
-------
float
Value in [0, 1] that indicates the degree of early recognition
Notes
-----
This function requires RDKit to be installed.
References
----------
.. [1] Truchon et al. "Evaluating virtual screening methods: good and bad metrics
for the “early recognition” problem." Journal of chemical information and modeling
47.2 (2007): 488-508.
"""
try:
from rdkit.ML.Scoring.Scoring import CalcBEDROC
except ModuleNotFoundError:
raise ValueError("This function requires RDKit to be installed.")
# validation
assert len(y_true) == len(y_pred), 'Number of examples do not match'
assert np.array_equal(
np.unique(y_true).astype(int),
[0, 1]), ('Class labels must be binary: %s' % np.unique(y_true))
yt = np.asarray(y_true)
yp = np.asarray(y_pred)
yt = yt.flatten()
yp = yp[:, 1].flatten() # Index 1 because one_hot predictions
scores = list(zip(yt, yp))
scores = sorted(scores, key=lambda pair: pair[1], reverse=True)
return CalcBEDROC(scores, 0, alpha)
| 2.234375 | 2 |
hvm/chains/base.py | hyperevo/py-helios-node | 0 | 3023 | <reponame>hyperevo/py-helios-node
from __future__ import absolute_import
import operator
from collections import deque
import functools
from abc import (
ABCMeta,
abstractmethod
)
import rlp_cython as rlp
import time
import math
from uuid import UUID
from typing import ( # noqa: F401
Any,
Optional,
Callable,
cast,
Dict,
Generator,
Iterator,
Tuple,
Type,
TYPE_CHECKING,
Union,
List,
Iterable,
)
import logging
from itertools import groupby
from hvm.rlp.receipts import Receipt
from hvm.types import Timestamp
from eth_typing import (
Address,
BlockNumber,
Hash32,
)
from eth_utils import (
to_tuple,
to_set,
)
from hvm.db.backends.base import BaseDB
from hvm.db.backends.memory import MemoryDB
from hvm.db.chain import (
BaseChainDB,
ChainDB,
)
from hvm.db.journal import (
JournalDB,
)
from hvm.db.read_only import ReadOnlyDB
from hvm.constants import (
BLOCK_GAS_LIMIT,
BLANK_ROOT_HASH,
NUMBER_OF_HEAD_HASH_TO_SAVE,
TIME_BETWEEN_HEAD_HASH_SAVE,
GENESIS_PARENT_HASH,
)
from hvm.db.trie import make_trie_root_and_nodes
from hvm import constants
from hvm.estimators import (
get_gas_estimator,
)
from hvm.exceptions import (
HeaderNotFound,
TransactionNotFound,
ValidationError,
VMNotFound,
BlockOnWrongChain,
CanonicalHeadNotFound,
CannotCalculateStake,
NotEnoughTimeBetweenBlocks,
ReceivableTransactionNotFound,
TriedImportingGenesisBlock,
JournalDbNotActivated,
ReplacingBlocksNotAllowed,
UnprocessedBlockNotAllowed,
AppendHistoricalRootHashTooOld,
HistoricalNetworkTPCMissing,
HistoricalMinGasPriceError,
UnprocessedBlockChildIsProcessed,
ParentNotFound,
NoChronologicalBlocks,
RewardProofSenderBlockMissing,
InvalidHeadRootTimestamp,
RewardAmountRoundsToZero, TriedDeletingGenesisBlock, NoGenesisBlockPresent)
from eth_keys.exceptions import (
BadSignature,
)
from hvm.utils.blocks import reorganize_chronological_block_list_for_correct_chronological_order_at_index
from hvm.validation import (
validate_block_number,
validate_uint256,
validate_word,
validate_vm_configuration,
validate_canonical_address,
validate_is_queue_block,
validate_centisecond_timestamp,
)
from hvm.rlp.blocks import (
BaseBlock,
BaseQueueBlock,
)
from hvm.rlp.headers import (
BlockHeader,
HeaderParams,
)
from hvm.rlp.transactions import (
BaseTransaction,
BaseReceiveTransaction
)
from hvm.utils.db import (
apply_state_dict,
)
from hvm.utils.datatypes import (
Configurable,
)
from hvm.utils.headers import (
compute_gas_limit_bounds,
)
from hvm.utils.hexadecimal import (
encode_hex,
decode_hex
)
from hvm.utils.rlp import (
ensure_imported_block_unchanged,
)
from hvm.db.chain_head import ChainHeadDB
from hvm.db.consensus import ConsensusDB
from eth_keys import keys
from eth_keys.datatypes import(
BaseKey,
PublicKey,
PrivateKey
)
from hvm.utils.numeric import (
effecient_diff,
are_items_in_list_equal,
)
from sortedcontainers import (
SortedList,
SortedDict,
)
from hvm.rlp.consensus import NodeStakingScore, PeerNodeHealth
from hvm.rlp.accounts import TransactionKey
if TYPE_CHECKING:
from hvm.vm.base import BaseVM # noqa: F401
from functools import partial
import asyncio
# Mapping from address to account state.
# 'balance', 'nonce' -> int
# 'code' -> bytes
# 'storage' -> Dict[int, int]
AccountState = Dict[Address, Dict[str, Union[int, bytes, Dict[int, int]]]]
class BaseChain(Configurable, metaclass=ABCMeta):
"""
The base class for all Chain objects
"""
chain_head_db: ChainHeadDB = None
chaindb: ChainDB = None
chaindb_class = None # type: Type[BaseChainDB]
vm_configuration = None # type: Tuple[Tuple[int, Type[BaseVM]], ...]
genesis_wallet_address: Address = None
genesis_block_timestamp: Timestamp = None
min_time_between_blocks: int = None
#
# Helpers
#
@classmethod
@abstractmethod
def get_chaindb_class(cls) -> Type[BaseChainDB]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None) -> ConsensusDB:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def enable_read_only_db(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Chain API
#
@classmethod
@abstractmethod
def from_genesis(cls,
base_db: BaseDB,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
@classmethod
@abstractmethod
def from_genesis_header(cls,
base_db: BaseDB,
genesis_header: BlockHeader) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_chain_at_block_parent(self, block: BaseBlock) -> 'BaseChain':
raise NotImplementedError("Chain classes must implement this method")
#
# VM API
#
@classmethod
def get_vm_configuration(cls) -> Tuple[Tuple[int, Type['BaseVM']], ...]:
return cls.vm_configuration
@classmethod
def get_vm_class(cls, header: BlockHeader) -> Type['BaseVM']:
"""
Returns the VM instance for the given block number.
"""
return cls.get_vm_class_for_block_timestamp(header.timestamp)
@abstractmethod
def get_vm(self, header: BlockHeader=None, timestamp: Timestamp = None) -> 'BaseVM':
raise NotImplementedError("Chain classes must implement this method")
@classmethod
def get_vm_class_for_block_timestamp(cls, timestamp: int = None) -> Type['BaseVM']:
"""
Returns the VM class for the given block number.
"""
if timestamp is None:
timestamp = int(time.time())
if cls.vm_configuration is None:
raise AttributeError("Chain classes must define the VMs in vm_configuration")
validate_uint256(timestamp)
for start_timestamp, vm_class in reversed(cls.vm_configuration):
if timestamp >= start_timestamp:
return vm_class
else:
raise VMNotFound("No vm available for timestamp #{0}".format(timestamp))
#
# Header API
#
@abstractmethod
def create_header_from_parent(self,
parent_header: BlockHeader,
**header_params: HeaderParams) -> BlockHeader:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_canonical_head(self):
raise NotImplementedError("Chain classes must implement this method")
#
# Block API
#
@abstractmethod
def get_ancestors(self, limit: int, header: BlockHeader=None) -> Iterator[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_by_number(self, block_number: BlockNumber, wallet_address: Address = None) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_blocks_on_chain(self, start: int, end: int, wallet_address: Address = None) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_blocks_on_chain(self, wallet_address: Address = None) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_blocks_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_blocks_on_chain_up_to_block_hash(self, chain_head_hash: Hash32, start_block_number: int = 0, limit: int = float('inf')) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block(self) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
# @abstractmethod
# def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
# raise NotImplementedError("Chain classes must implement this method")
# @abstractmethod
# def get_canonical_block_hash(self, block_number):
# raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_all_chronological_blocks_for_window(self, window_timestamp: Timestamp) -> List[BaseBlock]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_current_queue_block(self) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_current_queue_block_with_reward(self, node_staking_score_list: List[NodeStakingScore]) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(self, block_hash_to_delete: Hash32) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def purge_block_and_all_children_and_set_parent_as_chain_head(self, existing_block_header: BlockHeader):
raise NotImplementedError("Chain classes must implement this method")
#
# Chronologically consistent blockchain db API
#
@abstractmethod
def check_block_chronological_consistency(self, block: BaseBlock) -> List[Hash32]:
raise NotImplementedError("Chain classes must implement this method")
#
# Transaction API
#
@abstractmethod
def get_transaction_by_block_hash_and_index(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def populate_queue_block_with_receive_tx(self) -> List[BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_receive_transactions_by_hash(
self,
block_hash: Hash32) -> List['BaseReceiveTransaction']:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_receive_tx_from_send_tx(self, tx_hash: Hash32) -> Optional['BaseReceiveTransaction']:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_receivable_transactions(self) -> List[BaseReceiveTransaction]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_receivable_transactions(self, address: Address) -> Tuple[List[BaseReceiveTransaction], List[TransactionKey]]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_current_queue_block_nonce(self) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_and_sign_transaction_for_queue_block(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def create_and_sign_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
raise NotImplementedError("Chain classes must implement this method")
#
# Chronological Chain API
#
@abstractmethod
def try_to_rebuild_chronological_chain_from_historical_root_hashes(self, historical_root_hash_timestamp: Timestamp) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(self, historical_root_hash_timestamp: Timestamp) -> List[Tuple[Timestamp, Hash32]]:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Execution API
#
# @abstractmethod
# def apply_transaction(self, transaction):
# raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def estimate_gas(self, transaction: BaseTransaction, at_header: BlockHeader=None) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_block(self, block: BaseBlock, perform_validation: bool=True) -> BaseBlock:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_chain(self, block_list: List[BaseBlock], perform_validation: bool=True, save_block_head_hash_timestamp: bool = True, allow_replacement: bool = True) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def import_chronological_block_window(self, block_list: List[BaseBlock], window_start_timestamp: Timestamp,
save_block_head_hash_timestamp: bool = True,
allow_unprocessed: bool = False) -> None:
raise NotImplementedError("Chain classes must implement this method")
#
# Validation API
#
@abstractmethod
def get_allowed_time_of_next_block(self, chain_address: Address = None) -> Timestamp:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_block(self, block: BaseBlock) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_gaslimit(self, header: BlockHeader) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_block_specification(self, block) -> bool:
raise NotImplementedError("Chain classes must implement this method")
#
# Stake API
#
@abstractmethod
def get_mature_stake(self, wallet_address: Address = None, raise_canonical_head_not_found_error:bool = False) -> int:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_mature_stake_for_chronological_block_window(self, chronological_block_window_timestamp, timestamp_for_stake):
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_new_block_hash_to_test_peer_node_health(self) -> Hash32:
raise NotImplementedError("Chain classes must implement this method")
#
# Min Block Gas API used for throttling the network
#
@abstractmethod
def re_initialize_historical_minimum_gas_price_at_genesis(self) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def update_current_network_tpc_capability(self, current_network_tpc_cap: int,
update_min_gas_price: bool = True) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_local_tpc_cap(self) -> int:
raise NotImplementedError("Chain classes must implement this method")
#
# Consensus db passthrough with correct db corresponding to timestamp
#
@abstractmethod
def get_signed_peer_score(self, private_key: PrivateKey,
network_id: int,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_signed_peer_score_string_private_key(self,
private_key_string: bytes,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def validate_node_staking_score(self,
node_staking_score: NodeStakingScore,
since_block_number: BlockNumber) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def save_health_request(self, peer_wallet_address: Address, response_time_in_micros: int = float('inf')) -> None:
raise NotImplementedError("Chain classes must implement this method")
@abstractmethod
def get_current_peer_node_health(self,peer_wallet_address: Address) -> PeerNodeHealth:
raise NotImplementedError("Chain classes must implement this method")
class Chain(BaseChain):
"""
A Chain is a combination of one or more VM classes. Each VM is associated
with a range of blocks. The Chain class acts as a wrapper around these other
VM classes, delegating operations to the appropriate VM depending on the
current block number.
"""
raise_errors = False
logger = logging.getLogger("hvm.chain.chain.Chain")
header = None # type: BlockHeader
network_id = None # type: int
gas_estimator = None # type: Callable
_journaldb = None
num_journal_records_for_block_import = 0
chaindb_class = ChainDB # type: Type[BaseChainDB]
chain_head_db_class = ChainHeadDB
_queue_block: BaseQueueBlock = None
def __init__(self, base_db: BaseDB, wallet_address: Address, private_key: BaseKey=None) -> None:
if not self.vm_configuration:
raise ValueError(
"The Chain class cannot be instantiated with an empty `vm_configuration`"
)
else:
validate_vm_configuration(self.vm_configuration)
validate_canonical_address(wallet_address, "Wallet Address")
self.db = base_db
self.private_key = private_key
self.wallet_address = wallet_address
self.chaindb = self.get_chaindb_class()(self.db)
self.chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(self.db)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
#this is a new block, lets make a genesis block
# self.logger.debug("Creating new genesis block on chain {}".format(self.wallet_address))
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
if self.gas_estimator is None:
self.gas_estimator = get_gas_estimator() # type: ignore
def reinitialize(self):
self.__init__(self.db, self.wallet_address, self.private_key)
def set_new_wallet_address(self, wallet_address: Address, private_key: BaseKey=None):
self.logger.debug('setting new wallet address')
self.wallet_address = wallet_address
self.private_key = private_key
self.reinitialize()
@property
def queue_block(self):
if self._queue_block is None:
self._queue_block = self.get_queue_block()
return self._queue_block
@queue_block.setter
def queue_block(self,val:BaseQueueBlock):
self._queue_block = val
@property
def min_time_between_blocks(self):
vm = self.get_vm(timestamp=Timestamp(int(time.time())))
min_allowed_time_between_blocks = vm.min_time_between_blocks
return min_allowed_time_between_blocks
# @property
# def consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None):
# # gets the consensus db corresponding to the block timestamp
#
# return self.get_vm(header, timestamp).consensus_db
def get_consensus_db(self, header: BlockHeader = None, timestamp: Timestamp = None) -> ConsensusDB:
# gets the consensus db corresponding to the block timestamp
return self.get_vm(header, timestamp).consensus_db
#
# Global Record and discard API
#
def enable_read_only_db(self) -> None:
if not isinstance(self.db, ReadOnlyDB):
self.base_db = self.db
self.db = ReadOnlyDB(self.base_db)
self.reinitialize()
def enable_journal_db(self):
if self._journaldb is None:
self.base_db = self.db
self._journaldb = JournalDB(self.base_db)
#we keep the name self.db so that all of the functions still work, but at this point it is a journaldb.
self.db = self._journaldb
#reinitialize to ensure chain and chain_head_db have the new journaldb
self.reinitialize()
def disable_journal_db(self):
if self._journaldb is not None:
self.db = self.base_db
self._journaldb = None
#reinitialize to ensure chain and chain_head_db have the new journaldb
self.reinitialize()
def record_journal(self) -> UUID:
if self._journaldb is not None:
return (self._journaldb.record())
else:
raise JournalDbNotActivated()
def discard_journal(self, changeset: UUID) -> None:
if self._journaldb is not None:
db_changeset = changeset
self._journaldb.discard(db_changeset)
else:
raise JournalDbNotActivated()
def commit_journal(self, changeset: UUID) -> None:
if self._journaldb is not None:
db_changeset = changeset
self._journaldb.commit(db_changeset)
else:
raise JournalDbNotActivated()
def persist_journal(self) -> None:
if self._journaldb is not None:
self._journaldb.persist()
else:
raise JournalDbNotActivated()
#
# Helpers
#
@classmethod
def get_chaindb_class(cls) -> Type[BaseChainDB]:
if cls.chaindb_class is None:
raise AttributeError("`chaindb_class` not set")
return cls.chaindb_class
@classmethod
def get_chain_head_db_class(cls) -> Type[ChainHeadDB]:
if cls.chain_head_db_class is None:
raise AttributeError("`chain_head_db class` not set")
return cls.chain_head_db_class
@classmethod
def get_genesis_wallet_address(cls) -> Address:
if cls.genesis_wallet_address is None:
raise AttributeError("`genesis_wallet_address` not set")
return cls.genesis_wallet_address
#
# Chain API
#
@classmethod
def create_genesis_header(cls,
base_db: BaseDB,
wallet_address: Address,
private_key: BaseKey,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState=None,
) -> 'BaseChain':
genesis_vm_class = cls.get_vm_class_for_block_timestamp()
account_db = genesis_vm_class.get_state_class().get_account_db_class()(base_db)
if genesis_state is None:
genesis_state = {}
# mutation
account_db = apply_state_dict(account_db, genesis_state)
account_db.persist(save_account_hash = True, wallet_address = wallet_address)
genesis_params['account_hash'] = account_db.get_account_hash(wallet_address)
genesis_header = BlockHeader(**genesis_params)
signed_genesis_header = genesis_header.get_signed(private_key, cls.network_id)
chaindb = cls.get_chaindb_class()(base_db)
chaindb.persist_header(signed_genesis_header)
return signed_genesis_header
@classmethod
def from_genesis(cls,
base_db: BaseDB,
wallet_address: Address,
genesis_params: Dict[str, HeaderParams],
genesis_state: AccountState,
private_key: BaseKey = None
) -> 'BaseChain':
"""
Initializes the Chain from a genesis state.
"""
genesis_vm_class = cls.get_vm_class_for_block_timestamp()
account_db = genesis_vm_class.get_state_class().get_account_db_class()(
base_db
)
if genesis_state is None:
genesis_state = {}
# mutation
account_db = apply_state_dict(account_db, genesis_state)
account_db.persist(save_account_hash = True, wallet_address = cls.genesis_wallet_address)
genesis_header = BlockHeader(**genesis_params)
return cls.from_genesis_header(base_db, wallet_address = wallet_address, private_key = private_key, genesis_header = genesis_header)
@classmethod
def from_genesis_header(cls,
base_db: BaseDB,
wallet_address: Address,
genesis_header: BlockHeader,
private_key: BaseKey,
) -> 'BaseChain':
"""
Initializes the chain from the genesis header.
"""
chaindb = cls.get_chaindb_class()(base_db)
chaindb.persist_header(genesis_header)
chain_head_db = cls.get_chain_head_db_class()(base_db)
#window_for_this_block = math.ceil((genesis_header.timestamp+1)/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
window_for_this_block = int(genesis_header.timestamp / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE + TIME_BETWEEN_HEAD_HASH_SAVE
chain_head_db.set_chain_head_hash(cls.genesis_wallet_address, genesis_header.hash)
chain_head_db.initialize_historical_root_hashes(chain_head_db.root_hash, window_for_this_block)
chain_head_db.persist(save_current_root_hash = True)
#chain_head_db.add_block_hash_to_chronological_window(genesis_header.hash, genesis_header.timestamp)
return cls(base_db, wallet_address = wallet_address, private_key=private_key)
def get_chain_at_block_parent(self, block: BaseBlock) -> BaseChain:
"""
Returns a `Chain` instance with the given block's parent at the chain head.
"""
try:
parent_header = self.get_block_header_by_hash(block.header.parent_hash)
except HeaderNotFound:
raise ValidationError("Parent ({0}) of block {1} not found".format(
block.header.parent_hash,
block.header.hash
))
init_header = self.create_header_from_parent(parent_header)
return type(self)(self.chaindb.db, self.wallet_address, self.private_key, init_header)
#
# VM API
#
def get_vm(self, header: BlockHeader=None, timestamp: Timestamp = None) -> 'BaseVM':
"""
Returns the VM instance for the given block timestamp. Or if timestamp is given, gets the vm for that timestamp
"""
if header is not None and timestamp is not None:
raise ValueError("Cannot specify header and timestamp for get_vm(). Only one is allowed.")
if header is None or header == self.header:
header = self.header
if timestamp is not None:
header = header.copy(timestamp = timestamp)
vm_class = self.get_vm_class_for_block_timestamp(header.timestamp)
return vm_class(header=header,
chaindb=self.chaindb,
network_id=self.network_id)
else:
vm_class = self.get_vm_class_for_block_timestamp(header.timestamp)
return vm_class(header=header,
chaindb=self.chaindb,
network_id=self.network_id)
#
# Header API
#
def create_header_from_parent(self, parent_header, **header_params):
"""
Passthrough helper to the VM class of the block descending from the
given header.
"""
return self.get_vm_class_for_block_timestamp().create_header_from_parent(parent_header, **header_params)
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
"""
Returns the requested block header as specified by block hash.
Raises BlockNotFound if there's no block header with the given hash in the db.
"""
validate_word(block_hash, title="Block Hash")
return self.chaindb.get_block_header_by_hash(block_hash)
def get_canonical_head(self, chain_address = None):
"""
Returns the block header at the canonical chain head.
Raises CanonicalHeadNotFound if there's no head defined for the canonical chain.
"""
if chain_address is not None:
return self.chaindb.get_canonical_head(chain_address)
else:
return self.chaindb.get_canonical_head(self.wallet_address)
#
# Block API
#
def get_genesis_block_hash(self) -> Hash32:
return self.chaindb.get_canonical_block_hash(block_number = BlockNumber(0),
chain_address= self.genesis_wallet_address)
@to_tuple
def get_ancestors(self, limit: int, header: BlockHeader=None) -> Iterator[BaseBlock]:
"""
Return `limit` number of ancestor blocks from the current canonical head.
"""
if header is None:
header = self.header
lower_limit = max(header.block_number - limit, 0)
for n in reversed(range(lower_limit, header.block_number)):
yield self.get_block_by_number(BlockNumber(n), header.chain_address)
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
block_header = self.get_block_header_by_hash(block_hash)
return self.get_block_by_header(block_header)
def get_block_by_header(self, block_header: BlockHeader) -> BaseBlock:
"""
Returns the requested block as specified by the block header.
"""
block_class = self.get_vm_class_for_block_timestamp(block_header.timestamp).get_block_class()
send_transactions = self.chaindb.get_block_transactions(block_header, block_class.transaction_class)
receive_transactions = self.chaindb.get_block_receive_transactions(block_header,block_class.receive_transaction_class)
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block_class.reward_bundle_class)
output_block = block_class(block_header, send_transactions, receive_transactions, reward_bundle)
return output_block
def get_block_by_number(self, block_number: BlockNumber, chain_address: Address = None) -> BaseBlock:
if chain_address is None:
chain_address = self.wallet_address
block_hash = self.chaindb.get_canonical_block_hash(block_number, chain_address)
return self.get_block_by_hash(block_hash)
def get_blocks_on_chain(self, start: int, end: int, chain_address: Address = None) -> List[BaseBlock]:
if chain_address is None:
chain_address = self.wallet_address
if end == 0:
canonical_head_header = self.get_canonical_head(chain_address=chain_address)
head_block_number = canonical_head_header.block_number
end = head_block_number + 1
blocks = []
for block_number in range(start, end):
try:
new_block = self.get_block_by_number(BlockNumber(block_number), chain_address)
blocks.append(new_block)
except HeaderNotFound:
break
return blocks
def get_all_blocks_on_chain(self, chain_address: Address = None) -> List[BaseBlock]:
if chain_address is None:
chain_address = self.wallet_address
canonical_head_header = self.get_canonical_head(chain_address=chain_address)
head_block_number = canonical_head_header.block_number
return self.get_blocks_on_chain(0, head_block_number + 1, chain_address=chain_address)
def get_all_blocks_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[BaseBlock]:
chain_head_header = self.get_block_header_by_hash(chain_head_hash)
chain_address = chain_head_header.chain_address
return self.get_all_blocks_on_chain(chain_address)
def get_blocks_on_chain_up_to_block_hash(self, chain_head_hash: Hash32, start_block_number: int = 0, limit: int = float('inf')) -> List[BaseBlock]:
chain_head_header = self.get_block_header_by_hash(chain_head_hash)
to_block_number = chain_head_header.block_number
if to_block_number > (start_block_number + limit):
to_block_number = (start_block_number + limit)
chain_address = chain_head_header.chain_address
return self.get_blocks_on_chain(start_block_number, to_block_number + 1, chain_address)
def get_block(self) -> BaseBlock:
"""
Returns the current TIP block.
"""
return self.get_vm().block
def get_queue_block(self) -> BaseBlock:
"""
Returns the current TIP block.
"""
return self.get_vm().queue_block
# def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock:
# """
# Returns the requested block as specified by block hash.
# """
# validate_word(block_hash, title="Block Hash")
# block_header = self.get_block_header_by_hash(block_hash)
# return self.get_block_by_header(block_header)
# def get_canonical_block_by_number(self, block_number: BlockNumber) -> BaseBlock:
# """
# Returns the block with the given number in the canonical chain.
#
# Raises BlockNotFound if there's no block with the given number in the
# canonical chain.
# """
# validate_uint256(block_number, title="Block Number")
# return self.get_block_by_hash(self.chaindb.get_canonical_block_hash(block_number))
#
# def get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32:
# """
# Returns the block hash with the given number in the canonical chain.
#
# Raises BlockNotFound if there's no block with the given number in the
# canonical chain.
# """
# return self.chaindb.get_canonical_block_hash(block_number)
#
# Blockchain Database API
#
def save_chain_head_hash_to_trie_for_time_period(self,block_header):
timestamp = block_header.timestamp
currently_saving_window = int(time.time()/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE +TIME_BETWEEN_HEAD_HASH_SAVE
if timestamp <= currently_saving_window:
#we have to go back and put it into the correct window, and update all windows after that
#lets only keep the past NUMBER_OF_HEAD_HASH_TO_SAVE block_head_root_hash
window_for_this_block = int(timestamp / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE + TIME_BETWEEN_HEAD_HASH_SAVE
#window_for_this_block = math.ceil((timestamp + 1)/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
# if propogate_to_present:
self.chain_head_db.add_block_hash_to_timestamp(block_header.chain_address, block_header.hash, window_for_this_block)
# else:
# self.chain_head_db.add_block_hash_to_timestamp_without_propogating_to_present(self.wallet_address, block_header.hash, window_for_this_block)
#
# Queueblock API
#
def add_transaction_to_queue_block(self, transaction) -> None:
validate_is_queue_block(self.queue_block, title='self.queue_block')
if isinstance(transaction, BaseTransaction):
if not self.queue_block.contains_transaction(transaction):
self.queue_block = self.queue_block.add_transaction(transaction)
else:
self.logger.debug("found transaction in queueblock already, not adding again")
else:
if not self.queue_block.contains_receive_transaction(transaction):
self.queue_block = self.queue_block.add_receive_transaction(transaction)
else:
self.logger.debug("found receive transaction in queueblock already, not adding again")
def add_transactions_to_queue_block(self, transactions) -> None:
if not isinstance(transactions, list):
self.add_transaction_to_queue_block(transactions)
#self.logger.debug("tx_nonce after adding transaction = {}".format(self.queue_block.current_tx_nonce))
else:
for tx in transactions:
self.add_transaction_to_queue_block(tx)
def sign_queue_block(self, *args: Any, **kwargs: Any) -> BaseQueueBlock:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().sign_queue_block(*args, **kwargs)
def sign_header(self, *args: Any, **kwargs: Any) -> BlockHeader:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().sign_header(*args, **kwargs)
#
# Transaction API
#
def get_canonical_transaction(self, transaction_hash: Hash32) -> BaseTransaction:
"""
Returns the requested transaction as specified by the transaction hash
from the canonical chain.
Raises TransactionNotFound if no transaction with the specified hash is
found in the main chain.
"""
(block_hash, index, is_receive) = self.chaindb.get_transaction_index(transaction_hash)
block_header = self.get_block_header_by_hash(block_hash)
VM = self.get_vm_class_for_block_timestamp(block_header.timestamp)
if is_receive == False:
transaction = self.chaindb.get_transaction_by_index_and_block_hash(
block_hash,
index,
VM.get_transaction_class(),
)
else:
transaction = self.chaindb.get_receive_transaction_by_index_and_block_hash(
block_hash,
index,
VM.get_receive_transaction_class(),
)
if transaction.hash == transaction_hash:
return transaction
else:
raise TransactionNotFound("Found transaction {} instead of {} in block {} at {}".format(
encode_hex(transaction.hash),
encode_hex(transaction_hash),
block_hash,
index,
))
@functools.lru_cache(maxsize=32)
def get_transaction_by_block_hash_and_index(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
num_send_transactions = self.chaindb.get_number_of_send_tx_in_block(block_hash)
header = self.chaindb.get_block_header_by_hash(block_hash)
vm = self.get_vm(header=header)
if transaction_index >= num_send_transactions:
# receive transaction
transaction_index = transaction_index - num_send_transactions
tx = self.chaindb.get_receive_transaction_by_index_and_block_hash(block_hash=block_hash,
transaction_index=transaction_index,
transaction_class=vm.get_receive_transaction_class())
else:
# send transaction
tx = self.chaindb.get_transaction_by_index_and_block_hash(block_hash=block_hash,
transaction_index=transaction_index,
transaction_class=vm.get_transaction_class())
return tx
def create_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().create_transaction(*args, **kwargs)
def create_and_sign_transaction(self, *args: Any, **kwargs: Any) -> BaseTransaction:
if self.private_key is None:
raise ValueError("Cannot sign transaction because private key not provided for chain instantiation")
transaction = self.create_transaction(*args, **kwargs)
signed_transaction = transaction.get_signed(self.private_key, self.network_id)
return signed_transaction
def create_and_sign_transaction_for_queue_block(self, *args: Any, **kwargs: Any) -> BaseTransaction:
if 'nonce' not in kwargs or kwargs['nonce'] is None:
kwargs['nonce'] = self.get_current_queue_block_nonce()
transaction = self.create_and_sign_transaction(*args, **kwargs)
self.add_transactions_to_queue_block(transaction)
return transaction
def get_current_queue_block_nonce(self) -> int:
if self.queue_block is None or self.queue_block.current_tx_nonce is None:
tx_nonce = self.get_vm().state.account_db.get_nonce(self.wallet_address)
else:
tx_nonce =self.queue_block.current_tx_nonce
return tx_nonce
def create_receive_transaction(self, *args: Any, **kwargs: Any) -> BaseReceiveTransaction:
"""
Passthrough helper to the current VM class.
"""
return self.get_vm().create_receive_transaction(*args, **kwargs)
def get_receivable_transactions(self, address: Address) -> Tuple[List[BaseReceiveTransaction], List[TransactionKey]]:
#from hvm.rlp_templates.accounts import TransactionKey
tx_keys = self.get_vm().state.account_db.get_receivable_transactions(address)
if len(tx_keys) == 0:
return [], []
transactions = []
for tx_key in tx_keys:
tx = self.get_canonical_transaction(tx_key.transaction_hash)
transactions.append(tx)
return transactions, tx_keys
def create_receivable_transactions(self) -> List[BaseReceiveTransaction]:
tx_keys = self.get_vm().state.account_db.get_receivable_transactions(self.wallet_address)
if len(tx_keys) == 0:
return []
receive_transactions = []
for tx_key in tx_keys:
#find out if it is a receive or a refund
block_hash, index, is_receive = self.chaindb.get_transaction_index(tx_key.transaction_hash)
re_tx = self.get_vm().create_receive_transaction(
sender_block_hash = tx_key.sender_block_hash,
send_transaction_hash=tx_key.transaction_hash,
is_refund=is_receive,
)
receive_transactions.append(re_tx)
return receive_transactions
def populate_queue_block_with_receive_tx(self) -> List[BaseReceiveTransaction]:
receive_tx = self.create_receivable_transactions()
self.add_transactions_to_queue_block(receive_tx)
return receive_tx
def get_block_receive_transactions_by_hash(
self,
block_hash: Hash32) -> List['BaseReceiveTransaction']:
block_header = self.get_block_header_by_hash(block_hash)
vm = self.get_vm(header = block_header)
receive_transaction_class = vm.get_block_class().receive_transaction_class
receive_transactions = self.chaindb.get_block_receive_transactions(header = block_header, transaction_class = receive_transaction_class)
return receive_transactions
def get_receive_tx_from_send_tx(self, tx_hash: Hash32) -> Optional['BaseReceiveTransaction']:
block_hash, index, is_receive = self.chaindb.get_transaction_index(tx_hash)
if is_receive:
raise ValidationError("The provided tx hash is not for a send transaction")
send_transaction = self.get_canonical_transaction(tx_hash)
block_children = self.chaindb.get_block_children(block_hash)
if block_children is not None:
block_children_on_correct_chain = [child_hash for child_hash in block_children
if self.chaindb.get_chain_wallet_address_for_block_hash(child_hash) == send_transaction.to]
for block_hash in block_children_on_correct_chain:
receive_transactions = self.get_block_receive_transactions_by_hash(block_hash)
for receive_tx in receive_transactions:
if receive_tx.send_transaction_hash == tx_hash:
return receive_tx
return None
def get_transaction_by_index_and_block_hash(self, block_hash: Hash32, transaction_index: int) -> Union[BaseTransaction, BaseReceiveTransaction]:
header = self.chaindb.get_block_header_by_hash(block_hash)
vm = self.get_vm(header=header)
self.chaindb.get_transaction_by_index_and_block_hash()
self.chaindb.get_transaction_by_index_and_block_hash(
block_hash,
transaction_index,
vm.get_transaction_class(),
)
#
# Chronological Chain api
#
def try_to_rebuild_chronological_chain_from_historical_root_hashes(self, historical_root_hash_timestamp: Timestamp) -> None:
try:
correct_chronological_block_window = self.get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(historical_root_hash_timestamp)
self.chain_head_db.save_chronological_block_window(correct_chronological_block_window, historical_root_hash_timestamp-TIME_BETWEEN_HEAD_HASH_SAVE)
except InvalidHeadRootTimestamp:
pass
def get_block_hashes_that_are_new_for_this_historical_root_hash_timestamp(self, historical_root_hash_timestamp: Timestamp) -> List[Tuple[Timestamp, Hash32]]:
'''
This is a time consuming function that gets all of the blocks that are new in this root hash that didn't exist in the base root hash.
:param timestamp:
:return:
'''
block_window_start = historical_root_hash_timestamp - TIME_BETWEEN_HEAD_HASH_SAVE
base_root_hash = self.chain_head_db.get_historical_root_hash(block_window_start)
new_root_hash = self.chain_head_db.get_historical_root_hash(historical_root_hash_timestamp)
if base_root_hash == new_root_hash:
return None
if base_root_hash is None or new_root_hash is None:
raise InvalidHeadRootTimestamp(
"Could not load block hashes for this historical_root_hash_timestamp because we don't have a root hash for this window or the previous window.")
base_head_block_hashes = set(self.chain_head_db.get_head_block_hashes(base_root_hash))
new_head_block_hashes = set(self.chain_head_db.get_head_block_hashes(new_root_hash))
diff_head_block_hashes = new_head_block_hashes - base_head_block_hashes
chronological_block_hash_timestamps = []
# now we have to run down each chain until we get to a block that is older than block_window_start
for head_block_hash in diff_head_block_hashes:
header = self.chaindb.get_block_header_by_hash(head_block_hash)
chronological_block_hash_timestamps.append([header.timestamp, head_block_hash])
while True:
if header.parent_hash == GENESIS_PARENT_HASH:
break
try:
header = self.chaindb.get_block_header_by_hash(header.parent_hash)
except HeaderNotFound:
break
if header.timestamp < block_window_start:
break
chronological_block_hash_timestamps.append([header.timestamp, header.hash])
assert len(chronological_block_hash_timestamps) > 0
chronological_block_hash_timestamps.sort()
return chronological_block_hash_timestamps
# def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
# '''
# This function rebuilds all historical root hashes, and chronological blocks, from the blockchain database. It starts with the saved root hash and works backwards.
# This function needs to be run from chain because it requires chain_head_db and chaindb.
# :return:
# '''
#
# self.chain_head_db.load_saved_root_hash()
# current_window = self.chain_head_db.current_window
# earliest_root_hash = self.chain_head_db.earliest_window
# #TIME_BETWEEN_HEAD_HASH_SAVE
#
# # 1) iterate down the root hash times
# # 2) create new chain_head_db with memorydb
# # 3) go through each chain and any blocks newer than the timestamp, save to chronological window.
# # 4) when you reach a block less than the timestamp, set it as chain head in the new memory based chain_head_db
# # 5) get the root hash
# # 6) set this root hash in the real chain_head_db at the correct timestamp.
#
# # A chronological block window holds all of the blocks starting at its timestamp, going to timestamp + TIME_BETWEEN_HEAD_HASH_SAVE
# # A historical root hash is the root hash at the given timestamp, so it includes all blocks earlier than that timestamp.
#
# # us a journaldb so that it doesnt write changes to the database.
# temp_chain_head_db = self.get_chain_head_db_class()(MemoryDB())
# #temp_chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(JournalDB(self.db))
# for current_timestamp in range(current_window, earliest_root_hash-TIME_BETWEEN_HEAD_HASH_SAVE, -TIME_BETWEEN_HEAD_HASH_SAVE):
# self.logger.debug("Rebuilding chronological block window {}".format(current_timestamp))
# if current_timestamp < self.genesis_block_timestamp:
# break
#
# if current_timestamp == current_window:
# head_block_hashes = self.chain_head_db.get_head_block_hashes_list()
# else:
# head_block_hashes = temp_chain_head_db.get_head_block_hashes_list()
#
# # iterate over all chains
# for head_block_hash in head_block_hashes:
# current_block_hash = head_block_hash
# # now iterate over blocks in chain
# while True:
# current_header = self.chaindb.get_block_header_by_hash(current_block_hash)
# if current_header.timestamp >= current_timestamp:
# # add it to chronological block window in the real chain head db
# self.chain_head_db.add_block_hash_to_chronological_window(current_header.hash, current_header.timestamp)
# else:
# # The block is older than the timestamp. Set it as the chain head block hash in our temp chain head db
# temp_chain_head_db.set_chain_head_hash(current_header.chain_address, current_header.hash)
# break
# if current_header.parent_hash == GENESIS_PARENT_HASH:
# # we reached the end of the chain
# temp_chain_head_db.delete_chain_head_hash(current_header.chain_address)
# break
# # set the current block to the parent so we move down the chain
# current_block_hash = current_header.parent_hash
#
# # Now that we have gone through all chains, and removed any blocks newer than this timestamp, the root hash in the
# # temp chain head db is the correct one for this historical root hash timestamp.
# self.chain_head_db.save_single_historical_root_hash(temp_chain_head_db.root_hash, Timestamp(current_timestamp))
def initialize_historical_root_hashes_and_chronological_blocks(self) -> None:
'''
This function rebuilds all historical root hashes, and chronological blocks, from the blockchain database. It starts with the saved root hash and works backwards.
This function needs to be run from chain because it requires chain_head_db and chaindb.
:return:
'''
self.chain_head_db.load_saved_root_hash()
current_window = self.chain_head_db.current_window
earliest_root_hash = self.chain_head_db.earliest_window
#TIME_BETWEEN_HEAD_HASH_SAVE
# the saved
# 1) iterate down the root hash times
# 2) create new chain_head_db with memorydb
# 3) go through each chain and any blocks newer than the timestamp, save to chronological window.
# 4) when you reach a block less than the timestamp, set it as chain head in the new memory based chain_head_db
# 5) get the root hash
# 6) set this root hash in the real chain_head_db at the correct timestamp.
# A chronological block window holds all of the blocks starting at its timestamp, going to timestamp + TIME_BETWEEN_HEAD_HASH_SAVE
# A historical root hash is the root hash at the given timestamp, so it includes all blocks earlier than that timestamp.
self.logger.debug("Rebuilding chronological block windows")
# us a journaldb so that it doesnt write changes to the database.
temp_chain_head_db = self.get_chain_head_db_class()(MemoryDB())
#temp_chain_head_db = self.get_chain_head_db_class().load_from_saved_root_hash(JournalDB(self.db))
for current_timestamp in range(current_window, earliest_root_hash-TIME_BETWEEN_HEAD_HASH_SAVE, -TIME_BETWEEN_HEAD_HASH_SAVE):
if current_timestamp < self.genesis_block_timestamp:
break
head_block_hashes = self.chain_head_db.get_head_block_hashes_list()
# iterate over all chains
for head_block_hash in head_block_hashes:
current_block_hash = head_block_hash
# now iterate over blocks in chain
while True:
current_header = self.chaindb.get_block_header_by_hash(current_block_hash)
if current_header.timestamp >= current_timestamp:
# add it to chronological block window in the real chain head db
self.chain_head_db.add_block_hash_to_chronological_window(current_header.hash, current_header.timestamp)
else:
# The block is older than the timestamp. Set it as the chain head block hash in our temp chain head db
self.chain_head_db.set_chain_head_hash(current_header.chain_address, current_header.hash)
break
if current_header.parent_hash == GENESIS_PARENT_HASH:
# we reached the end of the chain
self.chain_head_db.delete_chain_head_hash(current_header.chain_address)
break
# set the current block to the parent so we move down the chain
current_block_hash = current_header.parent_hash
# Now that we have gone through all chains, and removed any blocks newer than this timestamp, the root hash in the
# temp chain head db is the correct one for this historical root hash timestamp.
self.chain_head_db.save_single_historical_root_hash(self.chain_head_db.root_hash, Timestamp(current_timestamp))
self.chain_head_db.persist()
# finally, lets load the saved root hash again so we are up to date.
self.chain_head_db.load_saved_root_hash()
#
# Execution API
#
def estimate_gas(self, transaction: BaseTransaction, at_header: BlockHeader=None) -> int:
"""
Returns an estimation of the amount of gas the given transaction will
use if executed on top of the block specified by the given header.
"""
if at_header is None:
at_header = self.get_canonical_head()
with self.get_vm(at_header).state_in_temp_block() as state:
return self.gas_estimator(state, transaction)
def validate_time_from_genesis_block(self,block):
if not block.is_genesis:
#first make sure enough time has passed since genesis. We need at least TIME_BETWEEN_HEAD_HASH_SAVE since genesis so that the
# genesis historical root hash only contains the genesis chain.
if block.header.timestamp < (self.genesis_block_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE):
raise NotEnoughTimeBetweenBlocks("Not enough time has passed since the genesis block. Must wait at least {} seconds after genesis block. "
"This block timestamp is {}, genesis block timestamp is {}.".format(TIME_BETWEEN_HEAD_HASH_SAVE, block.header.timestamp, self.genesis_block_timestamp))
return
#
# Reverting block functions
#
def delete_canonical_chain(self, wallet_address: Address, vm: 'BaseVM', save_block_head_hash_timestamp:bool = True) -> None:
self.logger.debug("delete_canonical_chain. Chain address {}".format(encode_hex(wallet_address)))
self.chain_head_db.delete_chain(wallet_address, save_block_head_hash_timestamp)
self.chaindb.delete_canonical_chain(wallet_address)
vm.state.clear_account_keep_receivable_transactions_and_persist(wallet_address)
def set_parent_as_canonical_head(self, existing_block_header: BlockHeader, vm: 'BaseVM', save_block_head_hash_timestamp:bool = True) -> None:
block_parent_header = self.chaindb.get_block_header_by_hash(existing_block_header.parent_hash)
self.logger.debug("Setting new block as canonical head after reverting blocks. Chain address {}, header hash {}".format(encode_hex(existing_block_header.chain_address), encode_hex(block_parent_header.hash)))
if save_block_head_hash_timestamp:
self.save_chain_head_hash_to_trie_for_time_period(block_parent_header)
self.chain_head_db.set_chain_head_hash(block_parent_header.chain_address, block_parent_header.hash)
self.chaindb._set_as_canonical_chain_head(block_parent_header)
vm.state.revert_account_to_hash_keep_receivable_transactions_and_persist(block_parent_header.account_hash, block_parent_header.chain_address)
def revert_block(self, descendant_block_hash: Hash32) -> None:
self.logger.debug('Reverting block with hash {}'.format(encode_hex(descendant_block_hash)))
descendant_block_header = self.chaindb.get_block_header_by_hash(descendant_block_hash)
vm = self.get_vm(descendant_block_header)
self.chain_head_db.delete_block_hash_from_chronological_window(descendant_block_hash, descendant_block_header.timestamp)
self.chaindb.remove_block_from_all_parent_child_lookups(descendant_block_header, vm.get_block_class().receive_transaction_class)
self.chaindb.delete_all_block_children_lookups(descendant_block_hash)
self.revert_block_chronological_consistency_lookups(descendant_block_hash)
#for every one, re-add pending receive transaction for all receive transactions only if sending block still exists
#make all blocks unprocessed so that receivable transactions are not saved that came from one of the non-canonical blocks.
vm.reverse_pending_transactions(descendant_block_header)
# remove the block from the canonical chain. This must be done last because reversing the pending transactions requires that it
# is still in the canonical chain to look up transactions
self.chaindb.delete_block_from_canonical_chain(descendant_block_hash)
#self.chaindb.save_unprocessed_block_lookup(descendant_block_hash)
vm.state.account_db.persist()
def revert_block_chronological_consistency_lookups(self, block_hash: Hash32) -> None:
# check to see if there are any reward type 2 proofs. Then loop through each one to revert inconsistency lookups
block_header = self.chaindb.get_block_header_by_hash(block_hash)
block_class = self.get_vm_class_for_block_timestamp(block_header.timestamp).get_block_class()
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block_class.reward_bundle_class)
chronological_consistency_key = [block_header.timestamp, block_header.hash]
for proof in reward_bundle.reward_type_2.proof:
# timestamp, block hash of block responsible
sender_chain_header = self.chaindb.get_block_header_by_hash(proof.head_hash_of_sender_chain)
# The chronological consistency restrictions are placed on the block on top of the one giving the proof.
block_number_with_restrictions = sender_chain_header.block_number + 1
self.chaindb.delete_block_consistency_key(sender_chain_header.chain_address, block_number_with_restrictions, chronological_consistency_key)
def purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(self, block_hash_to_delete: Hash32, save_block_head_hash_timestamp: bool = True) -> None:
genesis_block_hash = self.chaindb.get_canonical_block_hash(BlockNumber(0), self.genesis_wallet_address)
if block_hash_to_delete == genesis_block_hash:
raise TriedDeletingGenesisBlock("Attempted to delete genesis block. This is not allowed.")
block_header_to_delete = self.chaindb.get_block_header_by_hash(block_hash_to_delete)
self.purge_block_and_all_children_and_set_parent_as_chain_head(block_header_to_delete, save_block_head_hash_timestamp)
def purge_block_and_all_children_and_set_parent_as_chain_head(self, existing_block_header: BlockHeader, save_block_head_hash_timestamp: bool = True) -> None:
# First make sure it is actually in the canonical chain. If not, then we don't have anything to do.
if self.chaindb.is_in_canonical_chain(existing_block_header.hash):
vm = self.get_vm()
if existing_block_header.block_number == 0:
self.delete_canonical_chain(existing_block_header.chain_address, vm, save_block_head_hash_timestamp)
else:
#set the parent block as the new canonical head, and handle all the data for that
self.set_parent_as_canonical_head(existing_block_header, vm, save_block_head_hash_timestamp)
#1) delete chronological transactions, delete everything from chronological root hashes, delete children lookups
all_descendant_block_hashes = self.chaindb.get_all_descendant_block_hashes(existing_block_header.hash)
#first set all of the new chain heads and all the data that goes along with them
if all_descendant_block_hashes is not None:
for descendant_block_hash in all_descendant_block_hashes:
if not self.chaindb.is_block_unprocessed(descendant_block_hash):
descendant_block_header = self.chaindb.get_block_header_by_hash(descendant_block_hash)
if descendant_block_header.parent_hash not in all_descendant_block_hashes:
#this is the new head of a chain. set it as the new head for chronological root hashes
#except for children in this chain, because it will be off by 1 block. we already set this earlier
if descendant_block_header.chain_address != existing_block_header.chain_address:
if descendant_block_header.block_number == 0:
self.delete_canonical_chain(descendant_block_header.chain_address, vm, save_block_head_hash_timestamp)
else:
self.set_parent_as_canonical_head(descendant_block_header, vm, save_block_head_hash_timestamp)
# Must persist now because revert_block creates new vm's for each block and could overrwite changes if we wait.
vm.state.account_db.persist()
#now we know what the new heads are, so we can deal with the rest of the descendants
for descendant_block_hash in all_descendant_block_hashes:
#here, since we are already going through all children, we don't need this function to purge children as well
if self.chaindb.is_block_unprocessed(descendant_block_hash):
self.purge_unprocessed_block(descendant_block_hash, purge_children_too = False)
else:
self.revert_block(descendant_block_hash)
self.revert_block(existing_block_header.hash)
#persist changes
self.chain_head_db.persist(True)
self.reinitialize()
def purge_unprocessed_block(self, block_hash, purge_children_too = True):
'''
Deletes all unprocessed block lookups, and unprocessed children lookups for this block and all children blocks.
Todo: delete saved block header, and saved transaction tries for each block as well
'''
self.logger.debug("purging unprocessed block")
if purge_children_too:
self.logger.debug("purging unprocessed children")
if self.chaindb.has_unprocessed_children(block_hash):
self.logger.debug("HAS UNPROCESSED CHILDREN BLOCKS")
children_block_hashes = self.chaindb.get_block_children(block_hash)
if children_block_hashes != None:
for child_block_hash in children_block_hashes:
#this includes the child in this actual chain as well as children from send transactions.
if not self.chaindb.is_block_unprocessed(child_block_hash):
raise UnprocessedBlockChildIsProcessed("In process of deleting children of unprocessed block, and found one that is processed. This should never happen")
else:
self.purge_unprocessed_block(child_block_hash)
try:
block = self.get_block_by_hash(block_hash)
chain = encode_hex(block.header.chain_address)
self.logger.debug("deleting unprocessed child block number {} on chain {}".format(block.number, chain))
self.chaindb.remove_block_from_unprocessed(block)
except HeaderNotFound:
pass
def import_chronological_block_window(self, block_list: List[BaseBlock], window_start_timestamp: Timestamp, save_block_head_hash_timestamp:bool = True, allow_unprocessed:bool =False) -> None:
validate_uint256(window_start_timestamp, title='timestamp')
if block_list is None or len(block_list) == 0:
return
#if we are given a block that is not one of the two allowed classes, try converting it.
if len(block_list) > 0 and not isinstance(block_list[0], self.get_vm(timestamp = block_list[0].header.timestamp).get_block_class()):
self.logger.debug("converting chain to correct class")
corrected_block_list = []
for block in block_list:
corrected_block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
corrected_block_list.append(corrected_block)
block_list = corrected_block_list
#first we delete any blocks we have in the same window that are not in the new block list
local_chronological_timestamp_block_window = self.chain_head_db.load_chronological_block_window(window_start_timestamp)
if local_chronological_timestamp_block_window is not None:
local_block_hash_list = [x[1] for x in local_chronological_timestamp_block_window]
new_block_hash_list = [block.hash for block in block_list]
block_hashes_to_delete = effecient_diff(new_block_hash_list, local_block_hash_list)
if len(block_hashes_to_delete) > 0:
self.logger.debug("deleting existing blocks in chronological window {}".format(block_hashes_to_delete))
for block_hash_to_delete in block_hashes_to_delete:
self.purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(block_hash_to_delete)
if len(block_list) > 0:
self.logger.debug("starting block import for chronological block window")
#if block list is empty, load the local historical root hashes and delete them all
for i in range(len(block_list)):
# Reset this after each block imports
blocks_that_have_been_reorganized = set()
wallet_address = block_list[i].header.chain_address
while True:
try:
self.import_block(block_list[i], wallet_address = wallet_address, save_block_head_hash_timestamp = save_block_head_hash_timestamp, allow_unprocessed=allow_unprocessed)
break
except (UnprocessedBlockNotAllowed, ParentNotFound) as e:
# Because of the timestamps being in seconds, there may be multiple blocks that depend on each other
# with the same timestamp, and they could be out of order. So we attempt to reorganize the blocks
# and import again. If it fails again we will raise the exception.
if block_list[i].header.hash in blocks_that_have_been_reorganized:
self.logger.debug("Already tried reorganizing this block.")
raise e
self.logger.debug("Attempting to reorganize chronological window for import")
blocks_that_have_been_reorganized.add(block_list[i].header.hash)
block_list = reorganize_chronological_block_list_for_correct_chronological_order_at_index(block_list, i, self.logger)
else:
self.logger.debug("importing an empty chronological window. going to make sure we have a saved historical root hash")
historical_root_hashes = self.chain_head_db.get_historical_root_hashes()
if historical_root_hashes is not None:
#historical_root_hashes_dict = dict(historical_root_hashes)
#if it does exist, make sure it is the same as the last one. if not, then delete all newer
try:
self.chain_head_db.propogate_previous_historical_root_hash_to_timestamp(window_start_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE)
except AppendHistoricalRootHashTooOld:
self.logger.debug("Tried to propogate the previous historical root hash but there was none. This shouldn't happen")
#self.logger.debug("historical root hashes after chronological block import {}".format(self.chain_head_db.get_historical_root_hashes()))
def import_chain(self, block_list: List[BaseBlock], perform_validation: bool=True, save_block_head_hash_timestamp: bool = True, allow_replacement: bool = True) -> None:
if len(block_list) > 0:
self.logger.debug("importing chain")
#if we are given a block that is not one of the two allowed classes, try converting it.
if not isinstance(block_list[0], self.get_vm(timestamp = block_list[0].header.timestamp).get_block_class()):
self.logger.debug("converting chain to correct class")
corrected_block_list = []
for block in block_list:
corrected_block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
corrected_block_list.append(corrected_block)
block_list = corrected_block_list
wallet_address = block_list[0].header.chain_address
for block in block_list:
self.import_block(block,
perform_validation = perform_validation,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
wallet_address = wallet_address,
allow_replacement = allow_replacement)
# If we started with a longer chain, and all the imported blocks match ours, our chain will remain longer even after importing the new one.
# To fix this, we need to delete any blocks of ours that is longer in length then this chain that we are importing
# First make sure the whole chain imported correctly. If not, then we don't need to do anything
try:
local_canonical_head = self.chaindb.get_canonical_head(wallet_address)
imported_canonical_head = block_list[-1].header
#self.logger.debug("imported chain head hash {}. actual chain head hash {}".format(encode_hex(imported_canonical_head.hash), encode_hex(local_canonical_head.hash)))
if imported_canonical_head.block_number < local_canonical_head.block_number:
if self.chaindb.is_in_canonical_chain(imported_canonical_head.hash):
# Our chain is the same as the imported one, but we have some extra blocks on top. In this case, we would like to prune our chain
# to match the imported one.
# We only need to purge the next block after the imported chain. The vm will automatically purge all children
self.logger.debug("After importing a chain, our local chain is identical except with additional blocks on top. We will prune the top blocks to bring"
" our chain in line with the imported one.")
block_number_to_purge = imported_canonical_head.block_number + 1
hash_to_purge = self.chaindb.get_canonical_block_hash(BlockNumber(block_number_to_purge), wallet_address)
self.purge_block_and_all_children_and_set_parent_as_chain_head_by_hash(hash_to_purge, save_block_head_hash_timestamp)
except CanonicalHeadNotFound:
pass
from hvm.utils.profile import profile
@profile(sortby='cumulative')
def import_block_with_profiler(self, *args, **kwargs):
self.import_block(*args, **kwargs)
def import_block(self, block: BaseBlock,
perform_validation: bool=True,
save_block_head_hash_timestamp = True,
wallet_address = None,
allow_unprocessed = True,
allow_replacement = True,
ensure_block_unchanged:bool = True,
microblock_origin: bool = False) -> BaseBlock:
#we handle replacing blocks here
#this includes deleting any blocks that it might be replacing
#then we start the journal db
#then within _import_block, it can commit the journal
#but we wont persist until it gets out here again.
wallet_address = block.header.chain_address
# we need to re-initialize the chain for the new wallet address.
if wallet_address != self.wallet_address:
self.logger.debug("Changing to chain with wallet address {}".format(encode_hex(wallet_address)))
self.set_new_wallet_address(wallet_address=wallet_address)
journal_enabled = False
#if we are given a block that is not one of the two allowed classes, try converting it.
#There is no reason why this should be a queueblock, because a queueblock would never come over the network, it
#it always generated locally, and should have the correct class.
if not isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_block_class()):
self.logger.debug("converting block to correct class")
block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
if isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_queue_block_class()):
# Set the queue block timestamp to now, when it is being imported.
block = block.copy(header=block.header.copy(timestamp=int(time.time())))
else:
if block.header.chain_address == self.genesis_wallet_address and block.header.block_number == 0:
try:
our_genesis_hash = self.chaindb.get_canonical_block_header_by_number(BlockNumber(0), self.genesis_wallet_address).hash
except HeaderNotFound:
raise NoGenesisBlockPresent("Tried importing a block, but we have no genesis block loaded. Need to load a genesis block first.")
if block.header.hash == our_genesis_hash:
return block
else:
raise ValidationError("Tried to import a new genesis block on the genesis chain. This is not allowed.")
if len(block.transactions) == 0 and len(block.receive_transactions) == 0:
# if block.reward_bundle is None:
# raise ValidationError('The block must have at least 1 transaction, or a non-zero reward bundle. Reward bundle = None')
if (block.reward_bundle.reward_type_1.amount == 0 and block.reward_bundle.reward_type_2.amount == 0):
raise RewardAmountRoundsToZero('The block has no send or receive transactions, and the reward bundle has amount = 0 for all types of rewards. This is not allowed. If this is just a reward block this usually means more time needs to pass before creating reward bundle.')
#if we are adding to the top of the chain, or beyond, we need to check for unprocessed blocks
#handle deleting any unprocessed blocks that will be replaced.
if block.number >= self.header.block_number:
existing_unprocessed_block_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number)
if (existing_unprocessed_block_hash != block.hash) and (existing_unprocessed_block_hash is not None):
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to replace an unprocessed block.")
#check to make sure the parent matches the one we have
if block.number != 0:
# if block.number == self.header.block_number:
# existing_parent_hash = self.chaindb.get_canonical_head_hash(self.wallet_address)
# else:
existing_unprocessed_parent_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number-1)
if existing_unprocessed_parent_hash is not None:
if block.header.parent_hash != existing_unprocessed_parent_hash:
raise ParentNotFound("Parent is unprocessed. Parent hash = {}, this hash = {}".format(
encode_hex(existing_unprocessed_parent_hash), encode_hex(block.header.parent_hash)))
else:
try:
existing_canonical_parent_hash = self.chaindb.get_canonical_block_header_by_number(block.header.block_number-1, block.header.chain_address)
if block.header.parent_hash != existing_canonical_parent_hash:
raise ParentNotFound("Parent is canonical. Parent hash = {}, this hash = {}".format(
encode_hex(existing_canonical_parent_hash), encode_hex(block.header.parent_hash)))
except HeaderNotFound:
pass
#lets delete the unprocessed block, and its children, then import
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
self.purge_unprocessed_block(existing_unprocessed_block_hash)
#check to see if this is the same hash that was already saved as unprocessed
if block.number > self.header.block_number:
#check that the parent hash matches what we have.
existing_parent_hash = self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number-1)
#we can allow this for unprocessed blocks as long as we have the parent in our database
if existing_parent_hash == block.header.parent_hash:
if block.hash == self.chaindb.get_unprocessed_block_hash_by_block_number(self.wallet_address, block.number):
#we already imported this one
return_block = block
else:
#save as unprocessed
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because parent on this chain is unprocessed")
return_block = self.save_block_as_unprocessed(block)
if journal_enabled:
self.logger.debug('commiting journal')
self.commit_journal(journal_record)
self.persist_journal()
self.disable_journal_db()
return return_block
else:
raise ParentNotFound('Parent is unprocessed 2')
#now, if it is the head of the chain, lets make sure the parent hash is correct.
if block.number == self.header.block_number and block.number != 0:
if block.header.parent_hash != self.chaindb.get_canonical_head_hash(chain_address= self.wallet_address):
raise ParentNotFound("Block is at the head of the chain")
if block.number < self.header.block_number:
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to replace a canonical block")
self.logger.debug("went into block replacing mode")
self.logger.debug("block.number = {}, self.header.block_number = {}".format(block.number,self.header.block_number))
self.logger.debug("this chains wallet address = {}, this block's sender = {}".format(encode_hex(self.wallet_address), encode_hex(block.sender)))
#check to see if we can load the existing canonical block
existing_block_header = self.chaindb.get_canonical_block_header_by_number(block.number, self.wallet_address)
if existing_block_header.hash == block.header.hash:
self.logger.debug("tried to import a block that has a hash that matches the local block. no import required.")
return block
else:
if not journal_enabled:
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
self.purge_block_and_all_children_and_set_parent_as_chain_head(existing_block_header, save_block_head_hash_timestamp = save_block_head_hash_timestamp)
#check to see if this block is chronologically inconsistent - usually due to reward block that used proof from this chain
block_hashes_leading_to_inconsistency = self.check_block_chronological_consistency(block)
if len(block_hashes_leading_to_inconsistency) > 0:
if not allow_replacement:
raise ReplacingBlocksNotAllowed("Attempted to import chronologically inconsistent block. Block hashes leading to inconsistency = {}.".format([encode_hex(x) for x in block_hashes_leading_to_inconsistency]))
else:
# revert all of the blocks leading to the inconsistency.
if not journal_enabled:
self.enable_journal_db()
journal_record = self.record_journal()
journal_enabled = True
for block_hash in block_hashes_leading_to_inconsistency:
self.logger.debug("Purging block {} to preserve chronological consistency".format(encode_hex(block_hash)))
block_header = self.chaindb.get_block_header_by_hash(block_hash)
# This should be impossible, but lets double check that none of these blocks are on the same chain as this block
if block_header.chain_address == block.header.chain_address:
raise Exception("Tried to revert chronologically inconsistent block on this same chain. This should never happen...")
self.purge_block_and_all_children_and_set_parent_as_chain_head(block_header, save_block_head_hash_timestamp = save_block_head_hash_timestamp)
try:
return_block = self._import_block(block = block,
perform_validation = perform_validation,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
allow_unprocessed = allow_unprocessed,
ensure_block_unchanged= ensure_block_unchanged,
microblock_origin = microblock_origin)
# handle importing unprocessed blocks here because doing it recursively results in maximum recursion depth exceeded error
if not self.chaindb.is_block_unprocessed(return_block.hash):
self.logger.debug("Checking to see if block has unprocessed children")
self.import_all_unprocessed_descendants(return_block.hash,
perform_validation= True,
save_block_head_hash_timestamp = save_block_head_hash_timestamp,
allow_unprocessed = True)
except Exception as e:
if journal_enabled:
self.logger.debug('discarding journal')
self.discard_journal(journal_record)
self.disable_journal_db()
raise e
if journal_enabled:
self.logger.debug('commiting journal')
self.commit_journal(journal_record)
self.persist_journal()
self.disable_journal_db()
return return_block
def _import_block(self, block: BaseBlock,
perform_validation: bool=True,
save_block_head_hash_timestamp = True,
allow_unprocessed = True,
ensure_block_unchanged: bool = True,
microblock_origin: bool = False) -> BaseBlock:
"""
Imports a complete block.
"""
self.logger.debug("importing block {} with number {}".format(block.__repr__(), block.number))
self.validate_time_from_genesis_block(block)
if isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_queue_block_class()):
# If it was a queueblock, then the header will have changed after importing
perform_validation = False
ensure_block_unchanged = False
queue_block = True
else:
queue_block = False
if not self.chaindb.is_block_unprocessed(block.header.parent_hash):
#this part checks to make sure the parent exists
try:
vm = self.get_vm(timestamp = block.header.timestamp)
self.logger.debug("importing block with vm {}".format(vm.__repr__()))
if queue_block:
imported_block = vm.import_block(block, private_key = self.private_key)
else:
imported_block = vm.import_block(block)
# Validate the imported block.
if ensure_block_unchanged:
if microblock_origin:
# this started out as a microblock. So we only ensure the microblock fields are unchanged.
self.logger.debug('ensuring block unchanged. microblock correction')
corrected_micro_block = block.copy(header = block.header.copy(
receipt_root = imported_block.header.receipt_root,
bloom = imported_block.header.bloom,
gas_limit = imported_block.header.gas_limit,
gas_used = imported_block.header.gas_used,
account_hash = imported_block.header.account_hash,
account_balance = imported_block.header.account_balance,
))
ensure_imported_block_unchanged(imported_block, corrected_micro_block)
else:
self.logger.debug('ensuring block unchanged')
ensure_imported_block_unchanged(imported_block, block)
else:
self.logger.debug('Not checking block for changes.')
if perform_validation:
self.validate_block(imported_block)
#self.chain_head_db.set_chain_head_hash(self.wallet_address, imported_block.header.hash)
if save_block_head_hash_timestamp:
self.chain_head_db.add_block_hash_to_chronological_window(imported_block.header.hash, imported_block.header.timestamp)
self.save_chain_head_hash_to_trie_for_time_period(imported_block.header)
self.chain_head_db.set_chain_head_hash(imported_block.header.chain_address, imported_block.header.hash)
self.chain_head_db.persist(True)
self.chaindb.persist_block(imported_block)
vm.state.account_db.persist(save_account_hash = True, wallet_address = self.wallet_address)
#here we must delete the unprocessed lookup before importing children
#because the children cannot be imported if their chain parent is unprocessed.
#but we cannot delete the lookup for unprocessed children yet.
self.chaindb.remove_block_from_unprocessed(imported_block)
# Add chronological consistency lookups
self.save_block_chronological_consistency_lookups(imported_block)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
self.queue_block = None
self.logger.debug(
'IMPORTED_BLOCK: number %s | hash %s',
imported_block.number,
encode_hex(imported_block.hash),
)
# Make sure our wallet address hasn't magically changed
if self.wallet_address != imported_block.header.chain_address:
raise ValidationError("Attempted to import a block onto the wrong chain.")
return_block = imported_block
except ReceivableTransactionNotFound as e:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because of ReceivableTransactionNotFound error: {}".format(e))
return_block = self.save_block_as_unprocessed(block)
if self.raise_errors:
raise e
except RewardProofSenderBlockMissing as e:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because of RewardProofSenderBlockMissing error: {}".format(e))
return_block = self.save_block_as_unprocessed(block)
else:
if not allow_unprocessed:
raise UnprocessedBlockNotAllowed()
self.logger.debug("Saving block as unprocessed because parent on this chain is unprocessed")
return_block = self.save_block_as_unprocessed(block)
return return_block
def import_all_unprocessed_descendants(self, block_hash, *args, **kwargs):
# 1) get unprocessed children
# 2) loop through and import
# 3) if child imports, add their unprocessed children to list, and delete that block from unprocessed
# 4) if list of unprocessed children has 0 length, break
# need to step one level at a time. We use a queue to achieve this effect. It won't get to the next level
# until it finishes all of the blocks on this level. So it goes one level at a time.
if self.chaindb.has_unprocessed_children(block_hash):
self.logger.debug("HAS UNPROCESSED BLOCKS")
# try to import all children
children_block_hashes = self.chaindb.get_block_children(block_hash)
if children_block_hashes != None:
block_hashes_to_import = deque(children_block_hashes)
# iterate over children
while True:
# remove from right side
current_block_hash_to_import = block_hashes_to_import.pop()
if self.chaindb.is_block_unprocessed(current_block_hash_to_import):
self.logger.debug("importing child block")
try:
child_block = self.get_block_by_hash(current_block_hash_to_import)
if child_block.header.chain_address != self.wallet_address:
#self.logger.debug("Changing to chain with wallet address {}".format(encode_hex(child_block.header.chain_address)))
self.set_new_wallet_address(wallet_address=child_block.header.chain_address)
self._import_block(child_block, *args, **kwargs)
#if the block imported, add its children the the deque
if not self.chaindb.is_block_unprocessed(current_block_hash_to_import):
# it imported successfully
if self.chaindb.has_unprocessed_children(current_block_hash_to_import):
children_block_hashes = self.chaindb.get_block_children(current_block_hash_to_import)
if children_block_hashes != None:
block_hashes_to_import.extendleft(children_block_hashes)
# we have queued up its children to be imported. Assuming exceptions don't occur, we can remove this block from the unprocessed children lookup.
self.chaindb.delete_unprocessed_children_blocks_lookup(current_block_hash_to_import)
except Exception as e:
self.logger.error("Tried to import an unprocessed child block and got this error {}".format(e))
if len(block_hashes_to_import) == 0:
return
self.chaindb.delete_unprocessed_children_blocks_lookup(block_hash)
def save_block_chronological_consistency_lookups(self, block: BaseBlock) -> None:
'''
We need to require that the proof sender chain doesn't add a block after their claimed chain_head_hash, and the timestamp of this block being imported.
:param block:
:return:
'''
block_header = block.header
reward_bundle = self.chaindb.get_reward_bundle(block_header.reward_hash, block.reward_bundle_class)
chronological_consistency_key = [block_header.timestamp, block_header.hash]
for proof in reward_bundle.reward_type_2.proof:
# timestamp, block hash of block responsible
sender_chain_header = self.chaindb.get_block_header_by_hash(proof.head_hash_of_sender_chain)
# The chronological consistency restrictions are placed on the block on top of the one giving the proof.
block_number_with_restrictions = sender_chain_header.block_number + 1
self.logger.debug("saving chronological consistency lookup for chain {}, block {}, timestamp {}".format(encode_hex(sender_chain_header.chain_address), block_number_with_restrictions, block_header.timestamp))
self.chaindb.add_block_consistency_key(sender_chain_header.chain_address, block_number_with_restrictions, chronological_consistency_key)
def save_block_as_unprocessed(self, block):
#if it is already saved as unprocesessed, do nothing
if self.chaindb.is_block_unprocessed(block.hash):
return block
#before adding to unprocessed blocks, make sure the receive transactions are valid
# for receive_transaction in block.receive_transactions:
# #there must be at least 1 to get this far
# receive_transaction.validate()
#now we add it to unprocessed blocks
self.chaindb.save_block_as_unprocessed(block)
#save the transactions to db
vm = self.get_vm(timestamp = block.header.timestamp)
vm.save_items_to_db_as_trie(block.transactions, block.header.transaction_root)
vm.save_items_to_db_as_trie(block.receive_transactions, block.header.receive_transaction_root)
#we don't want to persist because that will add it to the canonical chain.
#We just want to save it to the database so we can process it later if needbe.
self.chaindb.persist_non_canonical_block(block)
#self.chaindb.persist_block(block)
try:
self.header = self.create_header_from_parent(self.get_canonical_head())
except CanonicalHeadNotFound:
self.header = self.get_vm_class_for_block_timestamp().create_genesis_block(self.wallet_address).header
self.queue_block = None
self.logger.debug(
'SAVED_BLOCK_AS_UNPROCESSED: number %s | hash %s',
block.number,
encode_hex(block.hash),
)
return block
def import_current_queue_block(self) -> BaseBlock:
return self.import_block(self.queue_block)
def import_current_queue_block_with_reward(self, node_staking_score_list: List[NodeStakingScore]) -> BaseBlock:
reward_bundle = self.get_consensus_db().create_reward_bundle_for_block(self.wallet_address, node_staking_score_list, at_timestamp=Timestamp(int(time.time())))
# #testing
# reward_bundle = reward_bundle.copy(reward_type_2 = reward_bundle.reward_type_2.copy(amount=0))
self.queue_block = self.queue_block.copy(reward_bundle = reward_bundle)
return self.import_current_queue_block()
def get_all_chronological_blocks_for_window(self, window_timestamp:Timestamp) -> List[BaseBlock]:
validate_uint256(window_timestamp, title='timestamp')
chronological_blocks = self.chain_head_db.load_chronological_block_window(window_timestamp)
if chronological_blocks is None:
return None
else:
list_of_blocks = []
for chronological_block in chronological_blocks:
block_hash = chronological_block[1]
new_block = self.get_block_by_hash(block_hash)
list_of_blocks.append(new_block)
return list_of_blocks
#
# Chronologically consistent blockchain db API
#
def check_block_chronological_consistency(self, block: BaseBlock) -> List[Hash32]:
'''
Checks to see if the block breaks any chronological consistency. If it does, it will return a list of blocks that need to be reverted for this block to be imported
returns list of block hashes that have to be reverted
:param block:
:return:
'''
consistency_keys = self.chaindb.get_block_chronological_consistency_keys(block.header.chain_address, block.header.block_number)
block_hashes_to_revert = list()
for consistency_key in consistency_keys:
if consistency_key[0] > block.header.timestamp:
block_hashes_to_revert.append(consistency_key[1])
return block_hashes_to_revert
#
# Validation API
#
def get_allowed_time_of_next_block(self, chain_address: Address = None) -> Timestamp:
if chain_address is None:
chain_address = self.wallet_address
try:
canonical_head = self.chaindb.get_canonical_head(chain_address=chain_address)
except CanonicalHeadNotFound:
return Timestamp(0)
vm = self.get_vm(timestamp=Timestamp(int(time.time())))
min_allowed_time_between_blocks = vm.min_time_between_blocks
return Timestamp(canonical_head.timestamp + min_allowed_time_between_blocks)
def validate_block(self, block: BaseBlock) -> None:
"""
Performs validation on a block that is either being mined or imported.
Since block validation (specifically the uncle validation must have
access to the ancestor blocks, this validation must occur at the Chain
level.
"""
self.validate_gaslimit(block.header)
def validate_gaslimit(self, header: BlockHeader) -> None:
"""
Validate the gas limit on the given header.
"""
#parent_header = self.get_block_header_by_hash(header.parent_hash)
#low_bound, high_bound = compute_gas_limit_bounds(parent_header)
#if header.gas_limit < low_bound:
# raise ValidationError(
# "The gas limit on block {0} is too low: {1}. It must be at least {2}".format(
# encode_hex(header.hash), header.gas_limit, low_bound))
if header.gas_limit > BLOCK_GAS_LIMIT:
raise ValidationError(
"The gas limit on block {0} is too high: {1}. It must be at most {2}".format(
encode_hex(header.hash), header.gas_limit, BLOCK_GAS_LIMIT))
def validate_block_specification(self, block) -> bool:
'''
This validates everything we can without looking at the blockchain database. It doesnt need to assume
that we have the block that sent the transactions.
This that this can check:
block signature
send transaction signatures
receive transaction signatures - dont need to check this. it doesnt add any security
signatures of send transaction within receive transactions
send transaction root matches transactions
receive transaction root matches transactions
'''
if not isinstance(block, self.get_vm(timestamp = block.header.timestamp).get_block_class()):
self.logger.debug("converting block to correct class")
block = self.get_vm(timestamp = block.header.timestamp).convert_block_to_correct_class(block)
block.header.check_signature_validity()
for transaction in block.transactions:
transaction.validate()
for transaction in block.receive_transactions:
transaction.validate()
send_tx_root_hash, _ = make_trie_root_and_nodes(block.transactions)
if block.header.transaction_root != send_tx_root_hash:
raise ValidationError("Block has invalid transaction root")
receive_tx_root_hash, _ = make_trie_root_and_nodes(block.receive_transactions)
if block.header.receive_transaction_root != receive_tx_root_hash:
raise ValidationError("Block has invalid receive transaction root")
return True
#
# Stake API
#
def get_mature_stake(self, wallet_address: Address = None, raise_canonical_head_not_found_error:bool = False) -> int:
if wallet_address is None:
wallet_address = self.wallet_address
coin_mature_time_for_staking = self.get_vm(timestamp = Timestamp(int(time.time()))).consensus_db.coin_mature_time_for_staking
return self.chaindb.get_mature_stake(wallet_address, coin_mature_time_for_staking, raise_canonical_head_not_found_error = raise_canonical_head_not_found_error)
# gets the stake for the timestamp corresponding to teh chronological block window, so it is all blocks for the next 1000 seconds.
def get_mature_stake_for_chronological_block_window(self, chronological_block_window_timestamp: Timestamp, timestamp_for_stake: Timestamp = None):
if timestamp_for_stake is not None and timestamp_for_stake < chronological_block_window_timestamp:
raise ValidationError("Cannot get chronological block window stake for a timestamp before the window")
if timestamp_for_stake is None:
timestamp_for_stake = int(time.time())
chronological_block_hash_timestamps = self.chain_head_db.load_chronological_block_window(chronological_block_window_timestamp)
chronological_block_hashes = [x[1] for x in chronological_block_hash_timestamps]
coin_mature_time_for_staking = self.get_vm(timestamp=timestamp_for_stake).consensus_db.coin_mature_time_for_staking
return self.chaindb.get_total_block_stake_of_block_hashes(chronological_block_hashes, coin_mature_time_for_staking, timestamp_for_stake)
def get_new_block_hash_to_test_peer_node_health(self) -> Hash32:
'''
returns one of the newest blocks we have seen.
:return:
'''
before_this_timestamp = int(time.time()) - 60 # ask the peer for a block that was received at before 1 minute ago
current_historical_window = int(time.time() / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
for timestamp in range(current_historical_window,
current_historical_window-NUMBER_OF_HEAD_HASH_TO_SAVE*TIME_BETWEEN_HEAD_HASH_SAVE,
-1* TIME_BETWEEN_HEAD_HASH_SAVE):
chronological_window = self.chain_head_db.load_chronological_block_window(timestamp)
if chronological_window is not None:
chronological_window.sort(key=lambda x: -1*x[0])
for timestamp_hash in chronological_window:
if timestamp_hash[0] < before_this_timestamp:
return timestamp_hash[1]
#if we get to here then we don't have any blocks within all chronological block windows...
raise NoChronologicalBlocks()
#
# Min Block Gas API used for throttling the network
#
def re_initialize_historical_minimum_gas_price_at_genesis(self) -> None:
'''
re-initializes system with last set min gas price and net tpc cap
'''
hist_min_gas_price = self.chaindb.load_historical_minimum_gas_price()
hist_tpc_cap = self.chaindb.load_historical_network_tpc_capability()
hist_tx_per_centisecond = self.chaindb.load_historical_tx_per_centisecond()
if hist_min_gas_price is not None:
init_min_gas_price = hist_min_gas_price[-1][1]
else:
init_min_gas_price = 1
if hist_tpc_cap is not None:
init_tpc_cap = hist_tpc_cap[-1][1]
else:
init_tpc_cap = self.get_local_tpc_cap()
if hist_tx_per_centisecond is not None:
init_tpc = hist_tx_per_centisecond[-1][1]
else:
init_tpc = None
self.chaindb.initialize_historical_minimum_gas_price_at_genesis(init_min_gas_price, init_tpc_cap, init_tpc)
def update_current_network_tpc_capability(self, current_network_tpc_cap: int, update_min_gas_price:bool = True) -> None:
validate_uint256(current_network_tpc_cap, title="current_network_tpc_cap")
self.chaindb.save_current_historical_network_tpc_capability(current_network_tpc_cap)
if update_min_gas_price:
current_centisecond = int(time.time()/100) * 100
timestamp_min_gas_price_updated = self.update_tpc_from_chronological(update_min_gas_price = True)
if timestamp_min_gas_price_updated > current_centisecond:
self.chaindb._recalculate_historical_mimimum_gas_price(current_centisecond)
def update_tpc_from_chronological(self, update_min_gas_price: bool = True):
#start at the newest window, if the same tps stop. but if different tps keep going back
self.logger.debug("Updating tpc from chronological")
current_historical_window = int(time.time()/TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
current_centisecond = int(time.time()/100) * 100
#load this once to find out if its None. If it is None, then the node just started, lets only go back 50 steps
#hist_tpc = self.chaindb.load_historical_tx_per_centisecond()
end_outer = current_historical_window-20*TIME_BETWEEN_HEAD_HASH_SAVE
for historical_window_timestamp in range(current_historical_window,
end_outer,
-TIME_BETWEEN_HEAD_HASH_SAVE):
tpc_sum_dict = {}
chronological_block_window = self.chain_head_db.load_chronological_block_window(historical_window_timestamp)
self.logger.debug('loading chronological block window for timestamp {}'.format(historical_window_timestamp))
#zero the dictionary
if historical_window_timestamp+TIME_BETWEEN_HEAD_HASH_SAVE < current_centisecond:
end = historical_window_timestamp +TIME_BETWEEN_HEAD_HASH_SAVE
else:
end = current_centisecond+100
for timestamp in range(historical_window_timestamp, end, 100):
tpc_sum_dict[timestamp] = 0
if chronological_block_window is not None:
for timestamp_block_hash in chronological_block_window:
#first count up the tx in the block
#if it is 0, then set to 1? in case block is all receive
num_tx_in_block = self.chaindb.get_number_of_total_tx_in_block(timestamp_block_hash[1])
if num_tx_in_block == 0:
num_tx_in_block = 1
#then add them to the dict
centisecond_window_for_block = int(timestamp_block_hash[0]/100) * 100
if centisecond_window_for_block <= end:
tpc_sum_dict[centisecond_window_for_block] += num_tx_in_block
same_as_database = self._update_tpc_from_chronological(tpc_sum_dict)
if same_as_database == True:
break
if update_min_gas_price:
self.chaindb._recalculate_historical_mimimum_gas_price(historical_window_timestamp + TIME_BETWEEN_HEAD_HASH_SAVE)
return historical_window_timestamp+TIME_BETWEEN_HEAD_HASH_SAVE
def _update_tpc_from_chronological(self, new_hist_tpc_dict):
'''
returns True if they are all the same as what we already had in the database, otherwise it returns False
'''
if not isinstance(new_hist_tpc_dict, dict):
raise ValidationError("Expected a dict. Didn't get a dict.")
hist_tpc = self.chaindb.load_historical_tx_per_centisecond()
difference_found = False
if hist_tpc is None:
hist_tpc = list(new_hist_tpc_dict.items())
else:
hist_tpc_dict = dict(hist_tpc)
for timestamp, tpc in new_hist_tpc_dict.items():
if timestamp not in hist_tpc_dict or hist_tpc_dict[timestamp] != tpc:
#if tpc != 0:
difference_found = True
hist_tpc_dict[timestamp] = tpc
hist_tpc = list(hist_tpc_dict.items())
#print(hist_tpc)
#save it to db
self.chaindb.save_historical_tx_per_centisecond(hist_tpc, de_sparse = False)
return not difference_found
def get_local_tpc_cap(self) -> int:
#base it on the time it takes to import a block
from hvm.utils.profile import profile
from hvm.db.backends.memory import MemoryDB
from hvm import MainnetChain
from hvm.chains.mainnet import (
MAINNET_TPC_CAP_TEST_GENESIS_PARAMS,
MAINNET_TPC_CAP_TEST_GENESIS_STATE,
TPC_CAP_TEST_GENESIS_PRIVATE_KEY,
MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT,
)
from hvm.constants import random_private_keys
db = MemoryDB()
chain = MainnetChain.from_genesis(db,
TPC_CAP_TEST_GENESIS_PRIVATE_KEY.public_key.to_canonical_address(),
MAINNET_TPC_CAP_TEST_GENESIS_PARAMS,
MAINNET_TPC_CAP_TEST_GENESIS_STATE,
private_key = TPC_CAP_TEST_GENESIS_PRIVATE_KEY)
block_to_import = chain.get_vm(timestamp = MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT['header']['timestamp']).get_block_class().from_dict(MAINNET_TPC_CAP_TEST_BLOCK_TO_IMPORT)
chain.genesis_wallet_address = MAINNET_TPC_CAP_TEST_GENESIS_PARAMS['chain_address']
chain.genesis_block_timestamp = MAINNET_TPC_CAP_TEST_GENESIS_PARAMS['timestamp']
#@profile(sortby='cumulative')
def temp():
chain.import_block(block_to_import)
start_time = time.time()
temp()
duration = time.time()-start_time
#self.logger.debug('duration = {} seconds'.format(duration))
tx_per_centisecond = int(100/duration)
return tx_per_centisecond
#
# Consensus DB passthrough's that depend on block timestamp
#
def get_signed_peer_score(self, private_key: PrivateKey,
network_id: int,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
# This function should always use the vm for the current timestamp. So we dont need to ask for timestamp
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_signed_peer_score(private_key,
network_id,
peer_wallet_address,
after_block_number)
def get_signed_peer_score_string_private_key(self,
private_key_string: bytes,
peer_wallet_address: Address,
after_block_number: BlockNumber = None,
) -> NodeStakingScore:
network_id = self.network_id
# This always occurs at this time. So we take the current consensus db
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_signed_peer_score_string_private_key(private_key_string,
network_id,
peer_wallet_address,
after_block_number)
def validate_node_staking_score(self,
node_staking_score: NodeStakingScore,
since_block_number: BlockNumber) -> None:
# This depends on when the staking score was created. So get the consensus db given by that timestamp
return self.get_consensus_db(timestamp = node_staking_score.timestamp).validate_node_staking_score(node_staking_score, since_block_number)
def save_health_request(self, peer_wallet_address: Address, response_time_in_micros: int = float('inf')) -> None:
# This always occurs at this time. So we take the current consensus db
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).save_health_request(peer_wallet_address,
response_time_in_micros)
def get_current_peer_node_health(self,peer_wallet_address: Address) -> PeerNodeHealth:
return self.get_consensus_db(timestamp=Timestamp(int(time.time()))).get_current_peer_node_health(peer_wallet_address)
| 1.570313 | 2 |
integreat_cms/api/v3/regions.py | Integreat/cms-django | 21 | 3024 | <filename>integreat_cms/api/v3/regions.py
"""
This module includes functions related to the regions API endpoint.
"""
from django.http import JsonResponse
from ...cms.models import Region
from ...cms.constants import region_status
from ..decorators import json_response
def transform_region(region):
"""
Function to create a JSON from a single region object, including information if region is live/active.
:param region: The region object which should be converted
:type region: ~integreat_cms.cms.models.regions.region.Region
:return: data necessary for API
:rtype: dict
"""
return {
"id": region.id,
"name": region.full_name,
"path": region.slug,
"live": region.status == region_status.ACTIVE,
"prefix": region.prefix,
"name_without_prefix": region.name,
"plz": region.postal_code,
"extras": region.offers.exists(),
"events": region.events_enabled,
"pois": region.locations_enabled,
"push_notifications": region.push_notifications_enabled,
"longitude": region.longitude,
"latitude": region.latitude,
"bounding_box": region.bounding_box.api_representation,
"aliases": region.aliases,
"tunews": region.tunews_enabled,
}
def transform_region_by_status(region):
"""
Function to create a JSON from a single "active" region object.
:param region: The region object which should be converted
:type region: ~integreat_cms.cms.models.regions.region.Region
:return: data necessary for API
:rtype: dict
"""
result = transform_region(region)
# Remove status
del result["live"]
return result
@json_response
def regions(_):
"""
List all regions that are not archived and transform result into JSON
:return: JSON object according to APIv3 regions endpoint definition
:rtype: ~django.http.JsonResponse
"""
result = list(
map(transform_region, Region.objects.exclude(status=region_status.ARCHIVED))
)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
@json_response
def liveregions(_):
"""
List all regions that are not archived and transform result into JSON
:return: JSON object according to APIv3 live regions endpoint definition
:rtype: ~django.http.JsonResponse
"""
result = list(
map(
transform_region_by_status,
Region.objects.filter(status=region_status.ACTIVE),
)
)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
@json_response
def hiddenregions(_):
"""
List all regions that are hidden and transform result into JSON
:return: JSON object according to APIv3 hidden regions endpoint definition
:rtype: ~django.http.JsonResponse
"""
result = list(
map(
transform_region_by_status,
Region.objects.filter(status=region_status.HIDDEN),
)
)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
| 2.578125 | 3 |
cli/src/ansible/AnsibleVarsGenerator.py | romsok24/epiphany | 0 | 3025 | <filename>cli/src/ansible/AnsibleVarsGenerator.py
import copy
import os
from cli.src.Config import Config
from cli.src.helpers.build_io import (get_ansible_path,
get_ansible_path_for_build,
get_ansible_vault_path)
from cli.src.helpers.data_loader import (load_all_schema_objs_from_directory,
load_schema_obj, types)
from cli.src.helpers.doc_list_helpers import (ExpectedSingleResultException,
select_first, select_single)
from cli.src.helpers.naming_helpers import to_feature_name, to_role_name
from cli.src.helpers.ObjDict import ObjDict
from cli.src.helpers.yaml_helpers import dump
from cli.src.schema.DefaultMerger import DefaultMerger
from cli.src.Step import Step
from cli.version import VERSION
class AnsibleVarsGenerator(Step):
def __init__(self, inventory_creator=None, inventory_upgrade=None):
super().__init__(__name__)
self.inventory_creator = inventory_creator
self.inventory_upgrade = inventory_upgrade
self.roles_with_generated_vars = []
self.manifest_docs = []
if inventory_creator is not None and inventory_upgrade is None:
self.cluster_model = inventory_creator.cluster_model
self.config_docs = [self.cluster_model] + inventory_creator.config_docs
elif inventory_upgrade is not None and inventory_creator is None:
self.cluster_model = inventory_upgrade.cluster_model
self.config_docs = []
defaults = load_all_schema_objs_from_directory(types.DEFAULT, 'common', 'configuration')
for default in defaults:
config_doc = select_first(inventory_upgrade.config_docs, lambda x: x.kind == default.kind)
if config_doc is None:
self.config_docs.append(default)
else:
self.config_docs.append(config_doc)
self.manifest_docs = inventory_upgrade.manifest_docs
else:
raise Exception('Invalid AnsibleVarsGenerator configuration')
def __enter__(self):
super().__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def generate(self):
self.logger.info('Generate Ansible vars')
self.is_upgrade_run = self.inventory_creator is None
if self.is_upgrade_run:
ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir)
else:
ansible_dir = get_ansible_path(self.cluster_model.specification.name)
cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml')
clean_cluster_model = self.get_clean_cluster_model()
with open(cluster_config_file_path, 'w') as stream:
if 'name' in clean_cluster_model:
del clean_cluster_model['name'] # reserved word in ansible!
dump(clean_cluster_model, stream)
if self.is_upgrade_run:
# For upgrade we always need common, repository, image_registry, node_exporter and postgresql. Common is
# already provisioned from the cluster model constructed from the inventory. As PostgreSQL configuration
# is changed between versions (e.g. wal_keep_segments -> wal_keep_size) and sometimes previous parameters
# are not compatible with the new ones, defaults are used for template processing
roles_with_defaults = [
'haproxy', 'image_registry', 'jmx_exporter', 'kafka_exporter',
'node_exporter', 'postgres_exporter', 'postgresql', 'repository'
]
# now lets add any external configs we want to load
roles_with_defaults = [*roles_with_defaults, *self.inventory_upgrade.get_new_config_roles()]
# In special cases (like haproxy), where user specifies majority of the config, it's easier (and less
# awkward) to re-render config templates instead of modifying (for example with regular expressions)
# no-longer-compatible config files.
roles_with_manifest = ['filebeat', 'postgresql', 'repository']
else:
roles_with_defaults = self.inventory_creator.get_enabled_roles()
roles_with_manifest = [] # applies only to upgrades
for role in roles_with_defaults:
kind = 'configuration/' + to_feature_name(role)
document = select_first(self.config_docs, lambda x: x.kind == kind)
if document is None:
self.logger.warn('No config document for enabled role: ' + role)
continue
document.specification['provider'] = self.cluster_model.provider
self.write_role_vars(ansible_dir, role, document)
for role in roles_with_manifest:
kind = 'configuration/' + to_feature_name(role)
self.write_role_manifest_vars(ansible_dir, role, kind)
self.populate_group_vars(ansible_dir)
def write_role_vars(self, ansible_dir, role, document, vars_file_name='main.yml'):
vars_dir = os.path.join(ansible_dir, 'roles', to_role_name(role), 'vars')
if not os.path.exists(vars_dir):
os.makedirs(vars_dir)
vars_file_path = os.path.join(vars_dir, vars_file_name)
with open(vars_file_path, 'w') as stream:
if 'name' in document:
del document['name'] # reserved word in ansible!
dump(document, stream)
if vars_file_name == 'main.yml':
self.roles_with_generated_vars.append(to_role_name(role))
def write_role_manifest_vars(self, ansible_dir, role, kind):
try:
cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
except ExpectedSingleResultException:
return # skip
document = select_first(self.manifest_docs, lambda x: x.kind == kind)
if document is None:
# If there is no document provided by the user, then fallback to defaults
document = load_schema_obj(types.DEFAULT, 'common', kind)
# Inject the required "version" attribute
document['version'] = VERSION
# Copy the "provider" value from the cluster model
document['provider'] = cluster_model['provider']
# Merge the document with defaults
with DefaultMerger([document]) as doc_merger:
document = doc_merger.run()[0]
self.write_role_vars(ansible_dir, role, document, vars_file_name='manifest.yml')
def populate_group_vars(self, ansible_dir):
main_vars = ObjDict()
main_vars['admin_user'] = self.cluster_model.specification.admin_user
main_vars['validate_certs'] = Config().validate_certs
main_vars['offline_requirements'] = Config().offline_requirements
main_vars['wait_for_pods'] = Config().wait_for_pods
main_vars['is_upgrade_run'] = self.is_upgrade_run
main_vars['roles_with_generated_vars'] = sorted(self.roles_with_generated_vars)
main_vars['upgrade_components'] = Config().upgrade_components
main_vars['epiphany_version'] = VERSION
# Consider to move this to the provider level.
if self.cluster_model.provider != 'any':
main_vars['k8s_as_cloud_service'] = self.cluster_model.specification.cloud.k8s_as_cloud_service
else:
main_vars['k8s_as_cloud_service'] = False
if self.is_upgrade_run:
shared_config_doc = self.get_shared_config_from_manifest()
else:
shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config')
# Fallback if there is completely no trace of the shared-config doc
if shared_config_doc is None:
shared_config_doc = load_schema_obj(types.DEFAULT, 'common', 'configuration/shared-config')
self.set_vault_path(shared_config_doc)
main_vars.update(shared_config_doc.specification)
vars_dir = os.path.join(ansible_dir, 'group_vars')
if not os.path.exists(vars_dir):
os.makedirs(vars_dir)
vars_file_name = 'all.yml'
vars_file_path = os.path.join(vars_dir, vars_file_name)
with open(vars_file_path, 'a') as stream:
dump(main_vars, stream)
def set_vault_path(self, shared_config):
if shared_config.specification.vault_location == '':
shared_config.specification.vault_tmp_file_location = Config().vault_password_location
cluster_name = self.get_cluster_name()
shared_config.specification.vault_location = get_ansible_vault_path(cluster_name)
def get_cluster_name(self):
if 'name' in self.cluster_model.specification.keys():
return self.cluster_model.specification.name
elif self.inventory_upgrade is not None:
return os.path.basename(self.inventory_upgrade.build_dir)
return 'default'
def get_clean_cluster_model(self):
cluster_model = copy.copy(self.cluster_model)
self.clear_object(cluster_model, 'credentials')
return cluster_model
def get_shared_config_from_manifest(self):
# Reuse shared config from existing manifest
# Shared config contains the use_ha_control_plane flag which is required during upgrades
cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
try:
shared_config_doc = select_single(self.manifest_docs, lambda x: x.kind == 'configuration/shared-config')
shared_config_doc['provider'] = cluster_model['provider']
except ExpectedSingleResultException:
# If there is no shared-config doc inside the manifest file, this is probably a v0.3 cluster
# Returning None here (there is nothing to merge at this point) and
# hoping that the shared-config doc from defaults will be enough
return None
# Remove un-used supported_os list if present from shared/config from manifest so we avoid namedlist merging errors.
# This has been refactored in from Epicli 1.0.x and no longer needed at this stage.
if hasattr(shared_config_doc.specification, 'supported_os'):
del shared_config_doc.specification['supported_os']
# Merge the shared config doc with defaults
with DefaultMerger([shared_config_doc]) as doc_merger:
shared_config_doc = doc_merger.run()[0]
del shared_config_doc['provider']
return shared_config_doc
def clear_object(self, obj_to_clean, key_to_clean):
for key, val in obj_to_clean.items():
if key == key_to_clean:
obj_to_clean[key] = ''
continue
if isinstance(obj_to_clean[key], ObjDict):
self.clear_object(obj_to_clean[key], key_to_clean)
| 1.945313 | 2 |
Python/4 kyu/Snail/test_snail.py | newtonsspawn/codewars_challenges | 3 | 3026 | <filename>Python/4 kyu/Snail/test_snail.py<gh_stars>1-10
from unittest import TestCase
from snail import snail
class TestSnail(TestCase):
def test_snail_001(self):
self.assertEqual(snail([[]]), [])
def test_snail_002(self):
self.assertEqual(snail([[1]]), [1])
def test_snail_003(self):
self.assertEqual(snail([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
[1, 2, 3, 6, 9, 8, 7, 4, 5])
def test_snail_004(self):
self.assertEqual(snail(
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15],
[16, 17, 18, 19, 20], [21, 22, 23, 24, 25]]),
[1, 2, 3, 4, 5, 10, 15, 20, 25, 24, 23, 22, 21, 16, 11,
6, 7, 8, 9, 14, 19, 18, 17, 12, 13])
def test_snail_005(self):
self.assertEqual(snail([[1, 2, 3, 4, 5, 6], [20, 21, 22, 23, 24, 7],
[19, 32, 33, 34, 25, 8],
[18, 31, 36, 35, 26, 9],
[17, 30, 29, 28, 27, 10],
[16, 15, 14, 13, 12, 11]]),
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36])
def test_snail_006(self):
self.assertEqual(snail([[350]]), [350])
def test_snail_007(self):
self.assertEqual(snail([[545, 588, 42, 119, 791, 866, 142, 699, 611,
400, 465, 373, 30, 71, 950, 813, 850, 652],
[664, 853, 1000, 561, 102, 363, 807, 553, 973,
643, 142, 433, 378, 702, 250, 641, 967, 172],
[908, 928, 776, 82, 547, 224, 730, 158, 169, 8,
111, 847, 891, 142, 906, 609, 443, 211],
[417, 35, 192, 167, 579, 885, 160, 755, 522,
360, 382, 783, 986, 474, 761, 416, 564, 561],
[506, 160, 540, 575, 374, 854, 313, 656, 546,
924, 838, 831, 23, 146, 116, 136, 47, 889],
[932, 515, 627, 982, 886, 609, 67, 966, 262,
953, 299, 246, 488, 526, 524, 855, 954, 752],
[182, 310, 522, 423, 907, 743, 440, 827, 548,
162, 397, 494, 592, 629, 856, 288, 601, 188],
[963, 862, 9, 812, 947, 721, 37, 170, 69, 326,
661, 829, 69, 341, 100, 751, 951, 844],
[252, 831, 293, 346, 830, 639, 657, 425, 294,
47, 477, 786, 852, 821, 858, 438, 251, 296],
[136, 891, 795, 298, 144, 686, 845, 781, 737,
850, 413, 88, 333, 311, 628, 962, 785, 548],
[307, 294, 706, 298, 825, 108, 508, 358, 978,
707, 26, 774, 332, 252, 407, 466, 77, 141],
[803, 134, 246, 768, 431, 724, 448, 362, 875,
983, 188, 254, 332, 249, 162, 167, 911, 639],
[266, 399, 765, 878, 433, 414, 178, 225, 507,
112, 510, 124, 88, 969, 728, 18, 813, 763],
[714, 580, 290, 935, 331, 813, 781, 114, 183,
437, 287, 870, 719, 690, 880, 319, 939, 715],
[283, 165, 518, 34, 109, 638, 327, 3, 369, 979,
696, 845, 34, 498, 736, 372, 166, 931],
[728, 490, 910, 342, 460, 955, 876, 935, 976,
887, 190, 517, 362, 15, 486, 138, 681, 996],
[585, 139, 62, 485, 628, 667, 213, 29, 910, 333,
854, 201, 613, 27, 552, 244, 251, 177],
[222, 791, 454, 246, 525, 626, 58, 512, 642,
561, 309, 674, 607, 441, 728, 782, 375, 113]]),
[545, 588, 42, 119, 791, 866, 142, 699, 611, 400, 465,
373, 30, 71, 950, 813, 850, 652, 172, 211, 561, 889,
752, 188, 844, 296, 548, 141, 639, 763, 715, 931, 996,
177, 113, 375, 782, 728, 441, 607, 674, 309, 561, 642,
512, 58, 626, 525, 246, 454, 791, 222, 585, 728, 283,
714, 266, 803, 307, 136, 252, 963, 182, 932, 506, 417,
908, 664, 853, 1000, 561, 102, 363, 807, 553, 973,
643, 142, 433, 378, 702, 250, 641, 967, 443, 564, 47,
954, 601, 951, 251, 785, 77, 911, 813, 939, 166, 681,
251, 244, 552, 27, 613, 201, 854, 333, 910, 29, 213,
667, 628, 485, 62, 139, 490, 165, 580, 399, 134, 294,
891, 831, 862, 310, 515, 160, 35, 928, 776, 82, 547,
224, 730, 158, 169, 8, 111, 847, 891, 142, 906, 609,
416, 136, 855, 288, 751, 438, 962, 466, 167, 18, 319,
372, 138, 486, 15, 362, 517, 190, 887, 976, 935, 876,
955, 460, 342, 910, 518, 290, 765, 246, 706, 795, 293,
9, 522, 627, 540, 192, 167, 579, 885, 160, 755, 522,
360, 382, 783, 986, 474, 761, 116, 524, 856, 100, 858,
628, 407, 162, 728, 880, 736, 498, 34, 845, 696, 979,
369, 3, 327, 638, 109, 34, 935, 878, 768, 298, 298,
346, 812, 423, 982, 575, 374, 854, 313, 656, 546, 924,
838, 831, 23, 146, 526, 629, 341, 821, 311, 252, 249,
969, 690, 719, 870, 287, 437, 183, 114, 781, 813, 331,
433, 431, 825, 144, 830, 947, 907, 886, 609, 67, 966,
262, 953, 299, 246, 488, 592, 69, 852, 333, 332, 332,
88, 124, 510, 112, 507, 225, 178, 414, 724, 108, 686,
639, 721, 743, 440, 827, 548, 162, 397, 494, 829, 786,
88, 774, 254, 188, 983, 875, 362, 448, 508, 845, 657,
37, 170, 69, 326, 661, 477, 413, 26, 707, 978, 358,
781, 425, 294, 47, 850, 737])
def test_snail_008(self):
self.assertEqual(snail([[844, 865, 787, 987, 255, 928, 812],
[533, 376, 869, 60, 824, 527, 355],
[238, 330, 215, 201, 335, 29, 225],
[828, 63, 172, 620, 315, 361, 758],
[14, 964, 210, 530, 997, 568, 288],
[855, 152, 486, 856, 360, 545, 564],
[549, 259, 544, 508, 793, 934, 567]]),
[844, 865, 787, 987, 255, 928, 812, 355, 225, 758, 288,
564, 567, 934, 793, 508, 544, 259, 549, 855, 14, 828,
238, 533, 376, 869, 60, 824, 527, 29, 361, 568, 545,
360, 856, 486, 152, 964, 63, 330, 215, 201, 335, 315,
997, 530, 210, 172, 620])
def test_snail_009(self):
self.assertEqual(snail([[277, 149, 76, 473, 385, 633, 41, 517, 918, 462,
769, 726, 161, 694, 26, 717, 309, 484],
[822, 156, 851, 683, 303, 638, 818, 714, 303,
509, 353, 557, 51, 592, 663, 475, 725, 40],
[40, 155, 345, 977, 600, 812, 851, 559, 152,
256, 965, 586, 591, 966, 146, 868, 262, 931],
[855, 170, 534, 89, 73, 910, 741, 195, 4, 547,
916, 887, 912, 610, 815, 619, 508, 196],
[600, 735, 378, 713, 511, 639, 703, 269, 326,
650, 223, 993, 760, 894, 430, 705, 896, 814],
[444, 223, 939, 289, 624, 837, 541, 975, 608,
446, 787, 963, 647, 660, 827, 544, 894, 634],
[643, 836, 653, 921, 77, 574, 411, 242, 52, 242,
411, 827, 875, 617, 653, 180, 85, 390],
[592, 287, 28, 699, 663, 170, 548, 812, 792, 68,
376, 733, 147, 475, 803, 513, 815, 515],
[366, 76, 557, 607, 661, 516, 434, 136, 41, 551,
670, 662, 248, 205, 485, 509, 59, 833],
[394, 608, 437, 669, 92, 194, 441, 444, 68, 269,
512, 104, 121, 176, 422, 278, 953, 69],
[187, 714, 933, 50, 576, 276, 594, 283, 258,
268, 95, 111, 353, 139, 342, 274, 141, 69],
[588, 50, 105, 400, 470, 733, 51, 342, 193, 6,
909, 690, 697, 215, 612, 27, 629, 861],
[784, 253, 98, 563, 118, 138, 610, 486, 602,
779, 153, 478, 956, 107, 460, 850, 447, 21],
[690, 48, 219, 72, 384, 261, 474, 383, 632, 868,
922, 826, 651, 612, 684, 339, 418, 743],
[955, 462, 403, 996, 131, 70, 485, 523, 407,
932, 100, 688, 240, 970, 98, 681, 356, 609],
[376, 795, 982, 482, 813, 496, 635, 618, 728,
96, 982, 884, 362, 168, 470, 919, 672, 921],
[327, 201, 195, 628, 731, 453, 778, 719, 751,
115, 429, 675, 983, 281, 389, 396, 876, 484],
[867, 449, 958, 381, 640, 749, 216, 358, 226,
155, 568, 795, 584, 220, 900, 207, 12, 440]]),
[277, 149, 76, 473, 385, 633, 41, 517, 918, 462, 769,
726, 161, 694, 26, 717, 309, 484, 40, 931, 196, 814,
634, 390, 515, 833, 69, 69, 861, 21, 743, 609, 921,
484, 440, 12, 207, 900, 220, 584, 795, 568, 155, 226,
358, 216, 749, 640, 381, 958, 449, 867, 327, 376, 955,
690, 784, 588, 187, 394, 366, 592, 643, 444, 600, 855,
40, 822, 156, 851, 683, 303, 638, 818, 714, 303, 509,
353, 557, 51, 592, 663, 475, 725, 262, 508, 896, 894,
85, 815, 59, 953, 141, 629, 447, 418, 356, 672, 876,
396, 389, 281, 983, 675, 429, 115, 751, 719, 778, 453,
731, 628, 195, 201, 795, 462, 48, 253, 50, 714, 608,
76, 287, 836, 223, 735, 170, 155, 345, 977, 600, 812,
851, 559, 152, 256, 965, 586, 591, 966, 146, 868, 619,
705, 544, 180, 513, 509, 278, 274, 27, 850, 339, 681,
919, 470, 168, 362, 884, 982, 96, 728, 618, 635, 496,
813, 482, 982, 403, 219, 98, 105, 933, 437, 557, 28,
653, 939, 378, 534, 89, 73, 910, 741, 195, 4, 547,
916, 887, 912, 610, 815, 430, 827, 653, 803, 485, 422,
342, 612, 460, 684, 98, 970, 240, 688, 100, 932, 407,
523, 485, 70, 131, 996, 72, 563, 400, 50, 669, 607,
699, 921, 289, 713, 511, 639, 703, 269, 326, 650, 223,
993, 760, 894, 660, 617, 475, 205, 176, 139, 215, 107,
612, 651, 826, 922, 868, 632, 383, 474, 261, 384, 118,
470, 576, 92, 661, 663, 77, 624, 837, 541, 975, 608,
446, 787, 963, 647, 875, 147, 248, 121, 353, 697, 956,
478, 153, 779, 602, 486, 610, 138, 733, 276, 194, 516,
170, 574, 411, 242, 52, 242, 411, 827, 733, 662, 104,
111, 690, 909, 6, 193, 342, 51, 594, 441, 434, 548,
812, 792, 68, 376, 670, 512, 95, 268, 258, 283, 444,
136, 41, 551, 269, 68])
def test_snail_010(self):
self.assertEqual(snail([[831, 609, 235, 391, 645, 469, 352, 982, 96,
596, 79, 460, 438, 280, 390],
[639, 19, 257, 411, 862, 508, 652, 265, 609,
188, 443, 425, 584, 11, 329],
[616, 731, 442, 315, 530, 954, 306, 455, 808,
921, 604, 282, 695, 778, 711],
[205, 735, 423, 803, 480, 736, 47, 13, 478, 960,
268, 844, 611, 102, 489],
[271, 314, 134, 650, 634, 984, 925, 565, 67,
651, 139, 697, 735, 616, 83],
[124, 381, 202, 355, 488, 99, 269, 486, 900,
601, 449, 777, 607, 702, 504],
[259, 357, 104, 126, 784, 649, 30, 243, 716,
436, 917, 272, 629, 864, 131],
[333, 402, 81, 766, 352, 14, 227, 796, 572, 623,
176, 196, 870, 5, 822],
[469, 67, 286, 430, 711, 336, 78, 384, 71, 783,
832, 458, 940, 511, 160],
[783, 286, 352, 679, 233, 493, 549, 83, 137,
498, 450, 214, 856, 925, 585],
[360, 663, 80, 307, 411, 97, 42, 857, 865, 954,
30, 778, 691, 880, 898],
[354, 373, 818, 619, 465, 957, 268, 876, 19, 58,
163, 138, 283, 970, 267],
[773, 79, 892, 808, 810, 35, 147, 377, 502, 400,
742, 345, 35, 120, 859],
[933, 643, 548, 241, 817, 661, 936, 837, 571,
596, 177, 296, 531, 836, 805],
[915, 268, 534, 369, 791, 90, 843, 104, 293, 92,
270, 306, 226, 797, 903]]),
[831, 609, 235, 391, 645, 469, 352, 982, 96, 596, 79,
460, 438, 280, 390, 329, 711, 489, 83, 504, 131, 822,
160, 585, 898, 267, 859, 805, 903, 797, 226, 306, 270,
92, 293, 104, 843, 90, 791, 369, 534, 268, 915, 933,
773, 354, 360, 783, 469, 333, 259, 124, 271, 205, 616,
639, 19, 257, 411, 862, 508, 652, 265, 609, 188, 443,
425, 584, 11, 778, 102, 616, 702, 864, 5, 511, 925,
880, 970, 120, 836, 531, 296, 177, 596, 571, 837, 936,
661, 817, 241, 548, 643, 79, 373, 663, 286, 67, 402,
357, 381, 314, 735, 731, 442, 315, 530, 954, 306, 455,
808, 921, 604, 282, 695, 611, 735, 607, 629, 870, 940,
856, 691, 283, 35, 345, 742, 400, 502, 377, 147, 35,
810, 808, 892, 818, 80, 352, 286, 81, 104, 202, 134,
423, 803, 480, 736, 47, 13, 478, 960, 268, 844, 697,
777, 272, 196, 458, 214, 778, 138, 163, 58, 19, 876,
268, 957, 465, 619, 307, 679, 430, 766, 126, 355, 650,
634, 984, 925, 565, 67, 651, 139, 449, 917, 176, 832,
450, 30, 954, 865, 857, 42, 97, 411, 233, 711, 352,
784, 488, 99, 269, 486, 900, 601, 436, 623, 783, 498,
137, 83, 549, 493, 336, 14, 649, 30, 243, 716, 572,
71, 384, 78, 227, 796])
def test_snail_011(self):
self.assertEqual(snail([[900, 61, 525, 325, 420, 389, 718, 967, 116,
156, 877, 301, 815],
[325, 921, 851, 66, 226, 759, 166, 754, 972,
199, 26, 673, 81],
[953, 211, 277, 170, 498, 206, 11, 766, 742,
101, 661, 674, 501],
[613, 645, 897, 883, 24, 499, 408, 404, 93, 464,
815, 546, 830],
[103, 374, 494, 259, 597, 463, 83, 658, 867,
321, 311, 942, 265],
[279, 214, 989, 896, 644, 152, 130, 439, 917,
664, 293, 835, 469],
[114, 212, 935, 146, 589, 399, 128, 61, 242,
1000, 695, 340, 119],
[67, 258, 342, 377, 207, 186, 296, 249, 902,
607, 168, 151, 890],
[331, 274, 68, 643, 694, 918, 141, 718, 26, 659,
786, 247, 685],
[760, 128, 36, 115, 509, 292, 665, 755, 426,
380, 813, 1000, 366],
[459, 285, 200, 835, 851, 925, 217, 506, 749,
313, 546, 588, 902],
[475, 556, 67, 602, 323, 842, 248, 103, 413,
276, 513, 254, 478],
[478, 749, 519, 165, 158, 393, 952, 614, 291,
781, 344, 774, 42]]),
[900, 61, 525, 325, 420, 389, 718, 967, 116, 156, 877,
301, 815, 81, 501, 830, 265, 469, 119, 890, 685, 366,
902, 478, 42, 774, 344, 781, 291, 614, 952, 393, 158,
165, 519, 749, 478, 475, 459, 760, 331, 67, 114, 279,
103, 613, 953, 325, 921, 851, 66, 226, 759, 166, 754,
972, 199, 26, 673, 674, 546, 942, 835, 340, 151, 247,
1000, 588, 254, 513, 276, 413, 103, 248, 842, 323,
602, 67, 556, 285, 128, 274, 258, 212, 214, 374, 645,
211, 277, 170, 498, 206, 11, 766, 742, 101, 661, 815,
311, 293, 695, 168, 786, 813, 546, 313, 749, 506, 217,
925, 851, 835, 200, 36, 68, 342, 935, 989, 494, 897,
883, 24, 499, 408, 404, 93, 464, 321, 664, 1000, 607,
659, 380, 426, 755, 665, 292, 509, 115, 643, 377, 146,
896, 259, 597, 463, 83, 658, 867, 917, 242, 902, 26,
718, 141, 918, 694, 207, 589, 644, 152, 130, 439, 61,
249, 296, 186, 399, 128])
def test_snail_012(self):
self.assertEqual(snail([[743, 389, 404, 786, 6, 509, 887, 481, 858, 117,
671, 344, 7, 855, 551, 838, 500, 736, 981,
342],
[823, 940, 897, 877, 616, 425, 425, 300, 769,
780, 755, 505, 48, 339, 987, 285, 118, 949,
245, 644],
[68, 37, 515, 914, 885, 247, 552, 998, 53, 782,
913, 34, 413, 744, 462, 794, 589, 405, 233,
850],
[905, 208, 712, 995, 261, 154, 768, 118, 908,
452, 706, 612, 584, 638, 480, 969, 345, 780,
435, 898],
[714, 11, 654, 957, 564, 362, 231, 41, 721, 254,
202, 137, 126, 174, 832, 661, 382, 654, 516,
300],
[218, 667, 767, 610, 339, 531, 335, 234, 53,
735, 742, 818, 233, 26, 634, 229, 316, 436,
999, 348],
[943, 451, 142, 545, 186, 542, 934, 22, 287,
166, 63, 495, 13, 433, 739, 270, 535, 305, 272,
254],
[322, 892, 751, 856, 280, 706, 632, 796, 507,
633, 52, 86, 116, 753, 489, 294, 869, 135, 565,
102],
[691, 412, 615, 389, 973, 462, 624, 172, 170,
56, 744, 558, 339, 871, 878, 495, 810, 454,
349, 261],
[545, 378, 844, 494, 172, 465, 897, 608, 755,
74, 367, 853, 407, 865, 168, 644, 477, 73, 166,
822],
[428, 985, 729, 790, 40, 290, 549, 491, 160,
429, 113, 379, 704, 432, 225, 713, 634, 879,
837, 958],
[803, 796, 762, 778, 917, 794, 792, 752, 325,
953, 986, 867, 35, 957, 623, 662, 916, 513,
324, 185],
[190, 680, 689, 189, 78, 591, 532, 174, 927,
376, 635, 12, 908, 253, 569, 267, 396, 112,
180, 22],
[797, 925, 775, 831, 921, 87, 748, 141, 33, 45,
194, 270, 661, 78, 968, 333, 132, 976, 788,
137],
[854, 147, 902, 213, 365, 342, 962, 662, 491,
86, 701, 493, 736, 705, 115, 472, 354, 815,
240, 24],
[595, 473, 899, 866, 157, 973, 725, 107, 417,
989, 205, 921, 98, 632, 234, 938, 112, 181,
604, 707],
[889, 203, 968, 727, 409, 651, 695, 854, 423,
968, 745, 680, 560, 935, 54, 862, 790, 343,
884, 627],
[135, 694, 358, 75, 237, 924, 493, 758, 998,
279, 80, 86, 174, 991, 585, 251, 99, 718, 611,
462],
[109, 360, 882, 794, 631, 587, 73, 10, 696, 582,
352, 797, 897, 58, 246, 277, 690, 511, 495,
437],
[99, 898, 540, 657, 563, 267, 39, 556, 819, 738,
888, 8, 140, 400, 619, 267, 859, 919, 301,
265]]),
[743, 389, 404, 786, 6, 509, 887, 481, 858, 117, 671,
344, 7, 855, 551, 838, 500, 736, 981, 342, 644, 850,
898, 300, 348, 254, 102, 261, 822, 958, 185, 22, 137,
24, 707, 627, 462, 437, 265, 301, 919, 859, 267, 619,
400, 140, 8, 888, 738, 819, 556, 39, 267, 563, 657,
540, 898, 99, 109, 135, 889, 595, 854, 797, 190, 803,
428, 545, 691, 322, 943, 218, 714, 905, 68, 823, 940,
897, 877, 616, 425, 425, 300, 769, 780, 755, 505, 48,
339, 987, 285, 118, 949, 245, 233, 435, 516, 999, 272,
565, 349, 166, 837, 324, 180, 788, 240, 604, 884, 611,
495, 511, 690, 277, 246, 58, 897, 797, 352, 582, 696,
10, 73, 587, 631, 794, 882, 360, 694, 203, 473, 147,
925, 680, 796, 985, 378, 412, 892, 451, 667, 11, 208,
37, 515, 914, 885, 247, 552, 998, 53, 782, 913, 34,
413, 744, 462, 794, 589, 405, 780, 654, 436, 305, 135,
454, 73, 879, 513, 112, 976, 815, 181, 343, 718, 99,
251, 585, 991, 174, 86, 80, 279, 998, 758, 493, 924,
237, 75, 358, 968, 899, 902, 775, 689, 762, 729, 844,
615, 751, 142, 767, 654, 712, 995, 261, 154, 768, 118,
908, 452, 706, 612, 584, 638, 480, 969, 345, 382, 316,
535, 869, 810, 477, 634, 916, 396, 132, 354, 112, 790,
862, 54, 935, 560, 680, 745, 968, 423, 854, 695, 651,
409, 727, 866, 213, 831, 189, 778, 790, 494, 389, 856,
545, 610, 957, 564, 362, 231, 41, 721, 254, 202, 137,
126, 174, 832, 661, 229, 270, 294, 495, 644, 713, 662,
267, 333, 472, 938, 234, 632, 98, 921, 205, 989, 417,
107, 725, 973, 157, 365, 921, 78, 917, 40, 172, 973,
280, 186, 339, 531, 335, 234, 53, 735, 742, 818, 233,
26, 634, 739, 489, 878, 168, 225, 623, 569, 968, 115,
705, 736, 493, 701, 86, 491, 662, 962, 342, 87, 591,
794, 290, 465, 462, 706, 542, 934, 22, 287, 166, 63,
495, 13, 433, 753, 871, 865, 432, 957, 253, 78, 661,
270, 194, 45, 33, 141, 748, 532, 792, 549, 897, 624,
632, 796, 507, 633, 52, 86, 116, 339, 407, 704, 35,
908, 12, 635, 376, 927, 174, 752, 491, 608, 172, 170,
56, 744, 558, 853, 379, 867, 986, 953, 325, 160, 755,
74, 367, 113, 429])
def test_snail_013(self):
self.assertEqual(snail([[567]]), [567])
def test_snail_014(self):
self.assertEqual(snail(
[[724, 455, 919, 922, 779, 711, 36, 791, 104, 490, 480],
[95, 252, 691, 12, 786, 563, 890, 446, 275, 899, 930],
[359, 844, 866, 827, 758, 81, 441, 768, 499, 983, 438],
[860, 385, 512, 34, 351, 822, 501, 130, 111, 249, 25],
[638, 30, 5, 102, 998, 888, 800, 651, 402, 40, 236],
[872, 21, 731, 902, 613, 442, 437, 581, 710, 453, 877],
[236, 661, 418, 662, 869, 936, 251, 302, 474, 578, 18],
[413, 451, 241, 737, 641, 677, 263, 617, 9, 172, 246],
[628, 390, 511, 786, 219, 833, 722, 419, 743, 695, 400],
[711, 986, 834, 181, 855, 780, 869, 238, 931, 993, 203],
[625, 896, 172, 269, 273, 722, 528, 434, 211, 278, 645]]),
[724, 455, 919, 922, 779, 711, 36, 791, 104, 490, 480,
930, 438, 25, 236, 877, 18, 246, 400, 203, 645, 278,
211, 434, 528, 722, 273, 269, 172, 896, 625, 711, 628,
413, 236, 872, 638, 860, 359, 95, 252, 691, 12, 786,
563, 890, 446, 275, 899, 983, 249, 40, 453, 578, 172,
695, 993, 931, 238, 869, 780, 855, 181, 834, 986, 390,
451, 661, 21, 30, 385, 844, 866, 827, 758, 81, 441,
768, 499, 111, 402, 710, 474, 9, 743, 419, 722, 833,
219, 786, 511, 241, 418, 731, 5, 512, 34, 351, 822,
501, 130, 651, 581, 302, 617, 263, 677, 641, 737, 662,
902, 102, 998, 888, 800, 437, 251, 936, 869, 613,
442])
def test_snail_015(self):
self.assertEqual(snail([[547, 471, 740, 487, 758, 466, 885, 554, 312,
618, 76, 890, 416, 621, 45, 33, 14],
[935, 878, 428, 721, 79, 762, 116, 797, 676,
755, 7, 142, 464, 861, 192, 943, 822],
[100, 325, 962, 434, 413, 313, 908, 842, 366,
618, 803, 480, 391, 263, 122, 148, 582],
[281, 741, 182, 236, 351, 611, 588, 857, 354,
837, 867, 258, 508, 882, 305, 396, 796],
[127, 795, 960, 196, 93, 41, 113, 949, 999, 880,
215, 844, 86, 229, 436, 746, 224],
[709, 283, 219, 254, 913, 900, 537, 617, 80, 18,
944, 372, 805, 981, 798, 380, 868],
[741, 7, 686, 690, 987, 382, 38, 560, 89, 889,
243, 684, 951, 686, 795, 711, 304],
[309, 225, 691, 223, 245, 377, 786, 669, 126,
835, 245, 62, 803, 234, 486, 580, 192],
[895, 172, 347, 645, 113, 700, 419, 573, 987,
403, 527, 893, 348, 508, 530, 558, 477],
[307, 725, 128, 975, 498, 796, 359, 268, 204,
165, 349, 942, 527, 73, 815, 372, 749],
[848, 950, 312, 560, 350, 943, 576, 873, 230, 4,
807, 561, 944, 629, 422, 342, 678],
[275, 41, 349, 925, 579, 139, 836, 777, 256,
422, 884, 587, 126, 836, 347, 692, 87],
[351, 100, 739, 316, 666, 372, 441, 858, 25,
747, 474, 234, 943, 393, 530, 336, 185],
[784, 847, 392, 698, 866, 494, 370, 12, 221,
689, 428, 491, 15, 677, 118, 496, 941],
[748, 782, 298, 359, 981, 334, 520, 809, 253,
69, 70, 909, 7, 662, 574, 128, 125],
[570, 682, 863, 589, 421, 147, 262, 647, 749,
76, 468, 740, 107, 277, 484, 905, 399],
[936, 382, 383, 764, 679, 634, 999, 371, 34,
581, 336, 178, 137, 860, 269, 341, 166]]),
[547, 471, 740, 487, 758, 466, 885, 554, 312, 618, 76,
890, 416, 621, 45, 33, 14, 822, 582, 796, 224, 868,
304, 192, 477, 749, 678, 87, 185, 941, 125, 399, 166,
341, 269, 860, 137, 178, 336, 581, 34, 371, 999, 634,
679, 764, 383, 382, 936, 570, 748, 784, 351, 275, 848,
307, 895, 309, 741, 709, 127, 281, 100, 935, 878, 428,
721, 79, 762, 116, 797, 676, 755, 7, 142, 464, 861,
192, 943, 148, 396, 746, 380, 711, 580, 558, 372, 342,
692, 336, 496, 128, 905, 484, 277, 107, 740, 468, 76,
749, 647, 262, 147, 421, 589, 863, 682, 782, 847, 100,
41, 950, 725, 172, 225, 7, 283, 795, 741, 325, 962,
434, 413, 313, 908, 842, 366, 618, 803, 480, 391, 263,
122, 305, 436, 798, 795, 486, 530, 815, 422, 347, 530,
118, 574, 662, 7, 909, 70, 69, 253, 809, 520, 334,
981, 359, 298, 392, 739, 349, 312, 128, 347, 691, 686,
219, 960, 182, 236, 351, 611, 588, 857, 354, 837, 867,
258, 508, 882, 229, 981, 686, 234, 508, 73, 629, 836,
393, 677, 15, 491, 428, 689, 221, 12, 370, 494, 866,
698, 316, 925, 560, 975, 645, 223, 690, 254, 196, 93,
41, 113, 949, 999, 880, 215, 844, 86, 805, 951, 803,
348, 527, 944, 126, 943, 234, 474, 747, 25, 858, 441,
372, 666, 579, 350, 498, 113, 245, 987, 913, 900, 537,
617, 80, 18, 944, 372, 684, 62, 893, 942, 561, 587,
884, 422, 256, 777, 836, 139, 943, 796, 700, 377, 382,
38, 560, 89, 889, 243, 245, 527, 349, 807, 4, 230,
873, 576, 359, 419, 786, 669, 126, 835, 403, 165, 204,
268, 573, 987])
def test_snail_016(self):
self.assertEqual(snail([[665, 175], [31, 103]]), [665, 175, 103, 31])
def test_snail_017(self):
self.assertEqual(snail([[755]]), [755])
def test_snail_018(self):
self.assertEqual(snail([[126]]), [126])
def test_snail_019(self):
self.assertEqual(snail([[636, 479, 441, 159, 593, 904, 31, 21, 198],
[558, 377, 166, 504, 919, 20, 495, 71, 899],
[955, 466, 168, 459, 223, 535, 369, 881, 709],
[814, 54, 762, 941, 804, 810, 498, 583, 828],
[678, 489, 88, 976, 967, 218, 494, 1000, 550],
[501, 310, 668, 403, 558, 697, 247, 393, 990],
[346, 220, 92, 707, 460, 106, 187, 606, 447],
[589, 900, 867, 818, 647, 180, 878, 809, 191],
[278, 820, 427, 859, 985, 594, 218, 851, 286]]),
[636, 479, 441, 159, 593, 904, 31, 21, 198, 899, 709,
828, 550, 990, 447, 191, 286, 851, 218, 594, 985, 859,
427, 820, 278, 589, 346, 501, 678, 814, 955, 558, 377,
166, 504, 919, 20, 495, 71, 881, 583, 1000, 393, 606,
809, 878, 180, 647, 818, 867, 900, 220, 310, 489, 54,
466, 168, 459, 223, 535, 369, 498, 494, 247, 187, 106,
460, 707, 92, 668, 88, 762, 941, 804, 810, 218, 697,
558, 403, 976, 967])
def test_snail_020(self):
self.assertEqual(snail([[34, 174, 567, 523, 884, 681, 348, 879],
[860, 127, 97, 983, 245, 516, 214, 358],
[812, 405, 787, 630, 856, 384, 973, 803],
[452, 925, 253, 481, 678, 517, 246, 855],
[471, 121, 342, 671, 92, 770, 690, 538],
[706, 207, 63, 874, 366, 336, 848, 708],
[771, 637, 708, 977, 977, 3, 562, 324],
[453, 816, 461, 143, 874, 992, 346, 923]]),
[34, 174, 567, 523, 884, 681, 348, 879, 358, 803, 855,
538, 708, 324, 923, 346, 992, 874, 143, 461, 816, 453,
771, 706, 471, 452, 812, 860, 127, 97, 983, 245, 516,
214, 973, 246, 690, 848, 562, 3, 977, 977, 708, 637,
207, 121, 925, 405, 787, 630, 856, 384, 517, 770, 336,
366, 874, 63, 342, 253, 481, 678, 92, 671])
def test_snail_021(self):
self.assertEqual(snail([[950, 222, 988, 710, 321, 798, 51],
[640, 844, 782, 506, 155, 308, 384],
[703, 52, 197, 723, 690, 468, 962],
[326, 195, 134, 216, 302, 503, 212],
[718, 323, 17, 449, 601, 380, 396],
[985, 698, 502, 864, 257, 804, 942],
[888, 418, 187, 880, 152, 432, 651]]),
[950, 222, 988, 710, 321, 798, 51, 384, 962, 212, 396,
942, 651, 432, 152, 880, 187, 418, 888, 985, 718, 326,
703, 640, 844, 782, 506, 155, 308, 468, 503, 380, 804,
257, 864, 502, 698, 323, 195, 52, 197, 723, 690, 302,
601, 449, 17, 134, 216])
def test_snail_022(self):
self.assertEqual(snail([[188, 383, 11, 265, 829, 552, 184, 587, 149,
839, 640, 638, 292, 990],
[523, 992, 378, 958, 526, 735, 753, 216, 781,
183, 273, 433, 458, 900],
[645, 764, 450, 273, 769, 871, 125, 983, 864,
318, 160, 300, 677, 990],
[245, 169, 676, 300, 81, 19, 481, 549, 922, 13,
798, 37, 785, 831],
[202, 912, 399, 946, 877, 577, 211, 149, 515, 7,
783, 194, 903, 458],
[241, 530, 605, 143, 110, 318, 450, 365, 300,
901, 863, 973, 997, 46],
[217, 471, 358, 537, 270, 529, 512, 306, 402,
11, 275, 228, 737, 751],
[231, 344, 693, 847, 723, 898, 87, 700, 558,
116, 927, 425, 220, 505],
[119, 851, 664, 891, 32, 670, 224, 37, 428, 45,
679, 170, 522, 181],
[506, 264, 274, 87, 567, 324, 203, 715, 628,
288, 836, 353, 367, 458],
[377, 859, 308, 788, 792, 211, 738, 314, 972,
557, 583, 789, 132, 271],
[483, 158, 749, 560, 743, 592, 710, 442, 650,
896, 323, 221, 309, 299],
[858, 549, 118, 588, 674, 975, 799, 910, 465,
453, 139, 448, 537, 680],
[713, 851, 964, 542, 64, 296, 923, 440, 225,
479, 744, 119, 144, 399]]),
[188, 383, 11, 265, 829, 552, 184, 587, 149, 839, 640,
638, 292, 990, 900, 990, 831, 458, 46, 751, 505, 181,
458, 271, 299, 680, 399, 144, 119, 744, 479, 225, 440,
923, 296, 64, 542, 964, 851, 713, 858, 483, 377, 506,
119, 231, 217, 241, 202, 245, 645, 523, 992, 378, 958,
526, 735, 753, 216, 781, 183, 273, 433, 458, 677, 785,
903, 997, 737, 220, 522, 367, 132, 309, 537, 448, 139,
453, 465, 910, 799, 975, 674, 588, 118, 549, 158, 859,
264, 851, 344, 471, 530, 912, 169, 764, 450, 273, 769,
871, 125, 983, 864, 318, 160, 300, 37, 194, 973, 228,
425, 170, 353, 789, 221, 323, 896, 650, 442, 710, 592,
743, 560, 749, 308, 274, 664, 693, 358, 605, 399, 676,
300, 81, 19, 481, 549, 922, 13, 798, 783, 863, 275,
927, 679, 836, 583, 557, 972, 314, 738, 211, 792, 788,
87, 891, 847, 537, 143, 946, 877, 577, 211, 149, 515,
7, 901, 11, 116, 45, 288, 628, 715, 203, 324, 567, 32,
723, 270, 110, 318, 450, 365, 300, 402, 558, 428, 37,
224, 670, 898, 529, 512, 306, 700, 87])
def test_snail_023(self):
self.assertEqual(snail([[903, 852, 365, 142, 106, 848, 913, 461, 732,
281, 800, 952, 711, 122],
[805, 299, 188, 853, 984, 79, 432, 280, 510,
925, 155, 124, 736, 567],
[793, 219, 758, 522, 833, 232, 24, 494, 164,
365, 205, 548, 145, 603],
[711, 113, 979, 976, 706, 457, 185, 895, 310,
106, 142, 270, 209, 577],
[866, 160, 28, 737, 871, 900, 799, 516, 203,
294, 45, 256, 242, 397],
[901, 606, 892, 620, 61, 398, 300, 14, 365, 616,
230, 82, 352, 98],
[441, 320, 684, 572, 254, 331, 401, 375, 970,
223, 65, 26, 167, 858],
[915, 104, 113, 774, 436, 832, 181, 939, 238,
90, 67, 227, 426, 55],
[846, 135, 332, 105, 110, 301, 794, 431, 860,
715, 201, 69, 744, 657],
[341, 691, 666, 61, 827, 814, 82, 276, 274, 888,
738, 387, 429, 69],
[706, 204, 421, 382, 258, 466, 97, 189, 893,
523, 910, 633, 510, 351],
[560, 109, 533, 541, 825, 571, 608, 542, 92,
385, 694, 762, 465, 620],
[369, 509, 928, 286, 860, 142, 4, 926, 657, 697,
743, 858, 430, 638],
[812, 243, 974, 854, 283, 573, 121, 48, 71, 536,
561, 687, 375, 884]]),
[903, 852, 365, 142, 106, 848, 913, 461, 732, 281, 800,
952, 711, 122, 567, 603, 577, 397, 98, 858, 55, 657,
69, 351, 620, 638, 884, 375, 687, 561, 536, 71, 48,
121, 573, 283, 854, 974, 243, 812, 369, 560, 706, 341,
846, 915, 441, 901, 866, 711, 793, 805, 299, 188, 853,
984, 79, 432, 280, 510, 925, 155, 124, 736, 145, 209,
242, 352, 167, 426, 744, 429, 510, 465, 430, 858, 743,
697, 657, 926, 4, 142, 860, 286, 928, 509, 109, 204,
691, 135, 104, 320, 606, 160, 113, 219, 758, 522, 833,
232, 24, 494, 164, 365, 205, 548, 270, 256, 82, 26,
227, 69, 387, 633, 762, 694, 385, 92, 542, 608, 571,
825, 541, 533, 421, 666, 332, 113, 684, 892, 28, 979,
976, 706, 457, 185, 895, 310, 106, 142, 45, 230, 65,
67, 201, 738, 910, 523, 893, 189, 97, 466, 258, 382,
61, 105, 774, 572, 620, 737, 871, 900, 799, 516, 203,
294, 616, 223, 90, 715, 888, 274, 276, 82, 814, 827,
110, 436, 254, 61, 398, 300, 14, 365, 970, 238, 860,
431, 794, 301, 832, 331, 401, 375, 939, 181])
def test_snail_024(self):
self.assertEqual(snail([[733]]), [733])
def test_snail_025(self):
self.assertEqual(snail([[776, 298, 262, 318, 957, 178, 428, 566, 345,
169, 434, 817, 494, 398, 648, 512, 314, 465],
[843, 563, 885, 994, 556, 571, 786, 143, 731,
828, 992, 701, 211, 989, 361, 904, 168, 175],
[153, 906, 802, 413, 532, 445, 864, 275, 891,
169, 899, 36, 278, 126, 691, 437, 199, 30],
[449, 454, 466, 728, 660, 493, 312, 492, 198,
771, 359, 787, 302, 121, 292, 282, 739, 958],
[798, 332, 106, 365, 874, 905, 831, 462, 88,
380, 443, 602, 925, 421, 564, 986, 446, 580],
[78, 187, 603, 551, 283, 789, 262, 542, 551,
422, 581, 100, 108, 574, 249, 473, 606, 83],
[359, 14, 876, 400, 826, 868, 779, 67, 946, 568,
826, 561, 582, 815, 72, 771, 851, 21],
[41, 860, 746, 556, 979, 831, 335, 126, 212,
701, 18, 318, 725, 944, 65, 802, 182, 433],
[746, 66, 844, 140, 842, 49, 547, 451, 436, 434,
72, 973, 2, 212, 311, 691, 546, 176],
[630, 510, 740, 7, 888, 439, 231, 788, 524, 270,
126, 558, 969, 576, 166, 393, 856, 548],
[538, 867, 432, 194, 149, 678, 379, 801, 182,
738, 209, 161, 950, 810, 869, 627, 395, 1000],
[523, 863, 18, 340, 416, 658, 734, 699, 538, 62,
740, 808, 202, 69, 895, 785, 882, 368],
[997, 453, 658, 870, 438, 799, 870, 257, 681,
887, 109, 40, 178, 475, 550, 283, 90, 167],
[243, 774, 470, 223, 518, 660, 730, 117, 885,
377, 305, 744, 622, 484, 789, 498, 464, 837],
[753, 492, 372, 529, 47, 461, 160, 259, 282,
983, 73, 192, 366, 101, 307, 257, 89, 968],
[135, 25, 644, 83, 479, 794, 845, 60, 310, 821,
239, 247, 713, 343, 405, 407, 308, 63],
[297, 590, 149, 649, 317, 843, 23, 652, 69, 819,
886, 381, 411, 781, 477, 672, 822, 185],
[642, 274, 676, 957, 888, 269, 954, 78, 8, 944,
730, 846, 83, 218, 865, 327, 705, 629]]),
[776, 298, 262, 318, 957, 178, 428, 566, 345, 169, 434,
817, 494, 398, 648, 512, 314, 465, 175, 30, 958, 580,
83, 21, 433, 176, 548, 1000, 368, 167, 837, 968, 63,
185, 629, 705, 327, 865, 218, 83, 846, 730, 944, 8,
78, 954, 269, 888, 957, 676, 274, 642, 297, 135, 753,
243, 997, 523, 538, 630, 746, 41, 359, 78, 798, 449,
153, 843, 563, 885, 994, 556, 571, 786, 143, 731, 828,
992, 701, 211, 989, 361, 904, 168, 199, 739, 446, 606,
851, 182, 546, 856, 395, 882, 90, 464, 89, 308, 822,
672, 477, 781, 411, 381, 886, 819, 69, 652, 23, 843,
317, 649, 149, 590, 25, 492, 774, 453, 863, 867, 510,
66, 860, 14, 187, 332, 454, 906, 802, 413, 532, 445,
864, 275, 891, 169, 899, 36, 278, 126, 691, 437, 282,
986, 473, 771, 802, 691, 393, 627, 785, 283, 498, 257,
407, 405, 343, 713, 247, 239, 821, 310, 60, 845, 794,
479, 83, 644, 372, 470, 658, 18, 432, 740, 844, 746,
876, 603, 106, 466, 728, 660, 493, 312, 492, 198, 771,
359, 787, 302, 121, 292, 564, 249, 72, 65, 311, 166,
869, 895, 550, 789, 307, 101, 366, 192, 73, 983, 282,
259, 160, 461, 47, 529, 223, 870, 340, 194, 7, 140,
556, 400, 551, 365, 874, 905, 831, 462, 88, 380, 443,
602, 925, 421, 574, 815, 944, 212, 576, 810, 69, 475,
484, 622, 744, 305, 377, 885, 117, 730, 660, 518, 438,
416, 149, 888, 842, 979, 826, 283, 789, 262, 542, 551,
422, 581, 100, 108, 582, 725, 2, 969, 950, 202, 178,
40, 109, 887, 681, 257, 870, 799, 658, 678, 439, 49,
831, 868, 779, 67, 946, 568, 826, 561, 318, 973, 558,
161, 808, 740, 62, 538, 699, 734, 379, 231, 547, 335,
126, 212, 701, 18, 72, 126, 209, 738, 182, 801, 788,
451, 436, 434, 270, 524])
def test_snail_026(self):
self.assertEqual(snail(
[[348, 421, 186, 172, 681, 428, 955, 583, 1000, 631, 543],
[751, 963, 968, 739, 248, 380, 307, 61, 874, 248, 908],
[803, 186, 336, 83, 196, 775, 898, 148, 43, 24, 993],
[274, 904, 695, 140, 582, 766, 810, 824, 717, 591, 136],
[632, 95, 397, 516, 457, 937, 220, 150, 971, 391, 283],
[157, 543, 946, 629, 703, 392, 816, 292, 935, 107, 289],
[794, 824, 923, 134, 486, 165, 956, 714, 775, 265, 654],
[261, 551, 238, 976, 460, 921, 501, 439, 811, 202, 916],
[817, 671, 357, 391, 181, 639, 191, 534, 945, 204, 249],
[761, 208, 763, 142, 330, 832, 998, 706, 301, 117, 615],
[977, 386, 105, 274, 166, 993, 248, 316, 340, 378, 886]]),
[348, 421, 186, 172, 681, 428, 955, 583, 1000, 631,
543, 908, 993, 136, 283, 289, 654, 916, 249, 615, 886,
378, 340, 316, 248, 993, 166, 274, 105, 386, 977, 761,
817, 261, 794, 157, 632, 274, 803, 751, 963, 968, 739,
248, 380, 307, 61, 874, 248, 24, 591, 391, 107, 265,
202, 204, 117, 301, 706, 998, 832, 330, 142, 763, 208,
671, 551, 824, 543, 95, 904, 186, 336, 83, 196, 775,
898, 148, 43, 717, 971, 935, 775, 811, 945, 534, 191,
639, 181, 391, 357, 238, 923, 946, 397, 695, 140, 582,
766, 810, 824, 150, 292, 714, 439, 501, 921, 460, 976,
134, 629, 516, 457, 937, 220, 816, 956, 165, 486, 703,
392])
def test_snail_027(self):
self.assertEqual(snail([[279, 149, 635, 162, 437, 751, 73, 382, 918,
994, 660, 832, 818, 312, 381, 306, 375, 87,
245],
[54, 599, 406, 599, 951, 888, 231, 723, 287,
692, 617, 275, 719, 445, 361, 954, 583, 951,
162],
[966, 522, 282, 502, 739, 889, 323, 635, 486,
477, 231, 502, 471, 524, 566, 189, 91, 694,
768],
[164, 463, 961, 850, 665, 898, 53, 331, 507, 69,
164, 99, 435, 418, 104, 868, 998, 186, 161],
[138, 179, 498, 106, 803, 338, 361, 631, 370,
805, 156, 583, 102, 486, 989, 468, 772, 491,
656],
[450, 129, 723, 662, 665, 9, 227, 23, 222, 199,
111, 556, 897, 4, 81, 665, 108, 906, 457],
[442, 235, 249, 838, 26, 861, 927, 55, 260, 9,
140, 495, 478, 544, 693, 849, 727, 448, 421],
[812, 736, 968, 113, 205, 680, 936, 699, 733,
830, 760, 301, 891, 701, 530, 34, 234, 764,
136],
[191, 591, 992, 189, 987, 162, 784, 566, 788,
983, 584, 919, 410, 408, 225, 778, 200, 854,
852],
[424, 5, 610, 711, 796, 952, 899, 192, 643, 399,
953, 720, 406, 324, 706, 943, 139, 87, 668],
[412, 431, 428, 777, 880, 971, 931, 966, 281,
510, 63, 1000, 115, 833, 746, 390, 333, 636,
671],
[249, 695, 992, 731, 15, 843, 567, 332, 762,
942, 804, 601, 83, 738, 165, 517, 258, 171,
227],
[976, 808, 967, 898, 78, 231, 563, 182, 696,
611, 421, 809, 6, 954, 656, 338, 422, 777,
172],
[839, 795, 83, 698, 557, 584, 452, 382, 89, 858,
886, 514, 671, 669, 827, 78, 160, 694, 784],
[1000, 249, 558, 794, 891, 668, 564, 399, 18,
452, 938, 516, 359, 2, 140, 31, 16, 876, 532],
[706, 99, 684, 613, 93, 504, 584, 599, 513, 638,
645, 334, 448, 148, 802, 805, 255, 759, 176],
[262, 671, 68, 389, 36, 561, 104, 285, 968, 896,
20, 912, 215, 161, 564, 476, 828, 815, 331],
[74, 29, 857, 758, 382, 578, 150, 745, 684, 558,
384, 439, 118, 599, 779, 378, 816, 996, 206],
[83, 545, 645, 856, 457, 736, 454, 105, 282,
587, 180, 436, 188, 477, 503, 377, 696, 918,
592]]),
[279, 149, 635, 162, 437, 751, 73, 382, 918, 994, 660,
832, 818, 312, 381, 306, 375, 87, 245, 162, 768, 161,
656, 457, 421, 136, 852, 668, 671, 227, 172, 784, 532,
176, 331, 206, 592, 918, 696, 377, 503, 477, 188, 436,
180, 587, 282, 105, 454, 736, 457, 856, 645, 545, 83,
74, 262, 706, 1000, 839, 976, 249, 412, 424, 191, 812,
442, 450, 138, 164, 966, 54, 599, 406, 599, 951, 888,
231, 723, 287, 692, 617, 275, 719, 445, 361, 954, 583,
951, 694, 186, 491, 906, 448, 764, 854, 87, 636, 171,
777, 694, 876, 759, 815, 996, 816, 378, 779, 599, 118,
439, 384, 558, 684, 745, 150, 578, 382, 758, 857, 29,
671, 99, 249, 795, 808, 695, 431, 5, 591, 736, 235,
129, 179, 463, 522, 282, 502, 739, 889, 323, 635, 486,
477, 231, 502, 471, 524, 566, 189, 91, 998, 772, 108,
727, 234, 200, 139, 333, 258, 422, 160, 16, 255, 828,
476, 564, 161, 215, 912, 20, 896, 968, 285, 104, 561,
36, 389, 68, 684, 558, 83, 967, 992, 428, 610, 992,
968, 249, 723, 498, 961, 850, 665, 898, 53, 331, 507,
69, 164, 99, 435, 418, 104, 868, 468, 665, 849, 34,
778, 943, 390, 517, 338, 78, 31, 805, 802, 148, 448,
334, 645, 638, 513, 599, 584, 504, 93, 613, 794, 698,
898, 731, 777, 711, 189, 113, 838, 662, 106, 803, 338,
361, 631, 370, 805, 156, 583, 102, 486, 989, 81, 693,
530, 225, 706, 746, 165, 656, 827, 140, 2, 359, 516,
938, 452, 18, 399, 564, 668, 891, 557, 78, 15, 880,
796, 987, 205, 26, 665, 9, 227, 23, 222, 199, 111,
556, 897, 4, 544, 701, 408, 324, 833, 738, 954, 669,
671, 514, 886, 858, 89, 382, 452, 584, 231, 843, 971,
952, 162, 680, 861, 927, 55, 260, 9, 140, 495, 478,
891, 410, 406, 115, 83, 6, 809, 421, 611, 696, 182,
563, 567, 931, 899, 784, 936, 699, 733, 830, 760, 301,
919, 720, 1000, 601, 804, 942, 762, 332, 966, 192,
566, 788, 983, 584, 953, 63, 510, 281, 643, 399])
def test_snail_028(self):
self.assertEqual(snail([[694, 584, 826, 873, 217, 367, 668, 234, 472,
306, 498, 94, 613, 797],
[712, 162, 246, 54, 330, 345, 797, 656, 949,
377, 907, 79, 246, 655],
[393, 162, 490, 233, 843, 794, 437, 391, 266,
639, 553, 518, 364, 569],
[844, 274, 883, 549, 545, 431, 169, 974, 129,
186, 605, 391, 354, 562],
[439, 363, 626, 800, 507, 849, 391, 701, 310,
374, 946, 329, 720, 188],
[110, 517, 124, 454, 546, 362, 238, 717, 444,
560, 620, 885, 732, 631],
[849, 531, 960, 464, 448, 802, 101, 755, 69,
843, 256, 543, 728, 839],
[538, 525, 681, 672, 849, 637, 688, 939, 393,
184, 675, 434, 361, 557],
[483, 832, 588, 542, 124, 605, 146, 492, 359,
465, 278, 352, 815, 884],
[837, 448, 77, 252, 291, 313, 816, 79, 919, 188,
845, 26, 918, 190],
[994, 349, 148, 613, 557, 269, 695, 471, 944,
90, 2, 167, 136, 926],
[596, 304, 727, 835, 858, 635, 727, 136, 179,
266, 171, 679, 985, 945],
[152, 294, 615, 139, 465, 165, 578, 914, 232,
953, 268, 143, 847, 663],
[355, 96, 458, 217, 834, 690, 302, 691, 470,
344, 567, 66, 479, 144]]),
[694, 584, 826, 873, 217, 367, 668, 234, 472, 306, 498,
94, 613, 797, 655, 569, 562, 188, 631, 839, 557, 884,
190, 926, 945, 663, 144, 479, 66, 567, 344, 470, 691,
302, 690, 834, 217, 458, 96, 355, 152, 596, 994, 837,
483, 538, 849, 110, 439, 844, 393, 712, 162, 246, 54,
330, 345, 797, 656, 949, 377, 907, 79, 246, 364, 354,
720, 732, 728, 361, 815, 918, 136, 985, 847, 143, 268,
953, 232, 914, 578, 165, 465, 139, 615, 294, 304, 349,
448, 832, 525, 531, 517, 363, 274, 162, 490, 233, 843,
794, 437, 391, 266, 639, 553, 518, 391, 329, 885, 543,
434, 352, 26, 167, 679, 171, 266, 179, 136, 727, 635,
858, 835, 727, 148, 77, 588, 681, 960, 124, 626, 883,
549, 545, 431, 169, 974, 129, 186, 605, 946, 620, 256,
675, 278, 845, 2, 90, 944, 471, 695, 269, 557, 613,
252, 542, 672, 464, 454, 800, 507, 849, 391, 701, 310,
374, 560, 843, 184, 465, 188, 919, 79, 816, 313, 291,
124, 849, 448, 546, 362, 238, 717, 444, 69, 393, 359,
492, 146, 605, 637, 802, 101, 755, 939, 688])
def test_snail_029(self):
self.assertEqual(snail([[823, 448, 897, 244, 584, 461, 96],
[645, 751, 213, 852, 812, 16, 617],
[341, 284, 208, 458, 28, 238, 767],
[773, 348, 159, 197, 957, 501, 818],
[932, 118, 964, 418, 423, 847, 430],
[545, 667, 931, 75, 818, 645, 45],
[923, 151, 732, 63, 520, 681, 627]]),
[823, 448, 897, 244, 584, 461, 96, 617, 767, 818, 430,
45, 627, 681, 520, 63, 732, 151, 923, 545, 932, 773,
341, 645, 751, 213, 852, 812, 16, 238, 501, 847, 645,
818, 75, 931, 667, 118, 348, 284, 208, 458, 28, 957,
423, 418, 964, 159, 197])
def test_snail_030(self):
self.assertEqual(snail([[491, 432, 751, 729, 722, 964, 386, 710, 130,
369, 227, 487, 395, 914, 468, 885, 81, 569,
868, 900],
[925, 992, 601, 188, 204, 640, 239, 6, 26, 451,
26, 630, 429, 830, 38, 905, 555, 630, 296,
840],
[401, 86, 682, 405, 960, 499, 290, 765, 513,
376, 331, 78, 471, 999, 3, 328, 896, 758, 56,
75],
[542, 905, 880, 788, 546, 879, 658, 836, 787,
912, 968, 988, 98, 461, 973, 469, 371, 178,
984, 431],
[584, 627, 404, 160, 875, 721, 409, 163, 30,
127, 499, 300, 869, 690, 69, 260, 751, 151,
288, 319],
[748, 508, 826, 682, 70, 215, 89, 186, 418, 386,
474, 42, 389, 599, 872, 534, 181, 496, 186,
21],
[546, 745, 446, 346, 449, 807, 863, 996, 605,
427, 845, 182, 932, 282, 544, 650, 123, 188,
505, 745],
[107, 963, 507, 886, 162, 321, 597, 90, 576,
101, 818, 394, 542, 276, 578, 417, 797, 89,
366, 771],
[904, 230, 474, 400, 921, 749, 277, 826, 638,
294, 520, 617, 405, 983, 437, 87, 940, 492,
561, 407],
[877, 195, 809, 714, 64, 362, 585, 4, 995, 949,
383, 172, 55, 468, 637, 229, 746, 208, 91,
708],
[663, 758, 330, 359, 996, 67, 409, 169, 660,
688, 11, 50, 191, 88, 802, 834, 559, 139, 490,
412],
[310, 464, 204, 408, 801, 352, 18, 167, 815,
753, 758, 833, 85, 731, 253, 655, 290, 493,
356, 396],
[424, 931, 222, 6, 67, 347, 450, 528, 353, 444,
283, 971, 925, 76, 208, 101, 989, 64, 209,
875],
[903, 651, 952, 356, 647, 99, 895, 868, 203,
620, 147, 200, 657, 839, 745, 260, 916, 552,
896, 209],
[721, 17, 825, 638, 691, 971, 95, 844, 75, 203,
692, 210, 618, 113, 518, 82, 493, 463, 647,
122],
[335, 97, 438, 636, 568, 329, 681, 998, 316,
679, 597, 547, 505, 283, 748, 299, 800, 828,
521, 139],
[209, 110, 325, 990, 706, 379, 897, 133, 457,
573, 653, 863, 452, 819, 801, 756, 590, 925,
583, 731],
[816, 946, 134, 587, 645, 751, 780, 140, 731,
208, 504, 939, 401, 724, 140, 1000, 575, 15,
966, 719],
[929, 121, 255, 511, 401, 94, 7, 656, 871, 52,
589, 504, 456, 524, 492, 4, 513, 673, 536,
877],
[828, 402, 44, 162, 805, 675, 391, 875, 955,
410, 385, 625, 250, 837, 153, 922, 105, 279,
91, 121]]),
[491, 432, 751, 729, 722, 964, 386, 710, 130, 369, 227,
487, 395, 914, 468, 885, 81, 569, 868, 900, 840, 75,
431, 319, 21, 745, 771, 407, 708, 412, 396, 875, 209,
122, 139, 731, 719, 877, 121, 91, 279, 105, 922, 153,
837, 250, 625, 385, 410, 955, 875, 391, 675, 805, 162,
44, 402, 828, 929, 816, 209, 335, 721, 903, 424, 310,
663, 877, 904, 107, 546, 748, 584, 542, 401, 925, 992,
601, 188, 204, 640, 239, 6, 26, 451, 26, 630, 429,
830, 38, 905, 555, 630, 296, 56, 984, 288, 186, 505,
366, 561, 91, 490, 356, 209, 896, 647, 521, 583, 966,
536, 673, 513, 4, 492, 524, 456, 504, 589, 52, 871,
656, 7, 94, 401, 511, 255, 121, 946, 110, 97, 17, 651,
931, 464, 758, 195, 230, 963, 745, 508, 627, 905, 86,
682, 405, 960, 499, 290, 765, 513, 376, 331, 78, 471,
999, 3, 328, 896, 758, 178, 151, 496, 188, 89, 492,
208, 139, 493, 64, 552, 463, 828, 925, 15, 575, 1000,
140, 724, 401, 939, 504, 208, 731, 140, 780, 751, 645,
587, 134, 325, 438, 825, 952, 222, 204, 330, 809, 474,
507, 446, 826, 404, 880, 788, 546, 879, 658, 836, 787,
912, 968, 988, 98, 461, 973, 469, 371, 751, 181, 123,
797, 940, 746, 559, 290, 989, 916, 493, 800, 590, 756,
801, 819, 452, 863, 653, 573, 457, 133, 897, 379, 706,
990, 636, 638, 356, 6, 408, 359, 714, 400, 886, 346,
682, 160, 875, 721, 409, 163, 30, 127, 499, 300, 869,
690, 69, 260, 534, 650, 417, 87, 229, 834, 655, 101,
260, 82, 299, 748, 283, 505, 547, 597, 679, 316, 998,
681, 329, 568, 691, 647, 67, 801, 996, 64, 921, 162,
449, 70, 215, 89, 186, 418, 386, 474, 42, 389, 599,
872, 544, 578, 437, 637, 802, 253, 208, 745, 518, 113,
618, 210, 692, 203, 75, 844, 95, 971, 99, 347, 352,
67, 362, 749, 321, 807, 863, 996, 605, 427, 845, 182,
932, 282, 276, 983, 468, 88, 731, 76, 839, 657, 200,
147, 620, 203, 868, 895, 450, 18, 409, 585, 277, 597,
90, 576, 101, 818, 394, 542, 405, 55, 191, 85, 925,
971, 283, 444, 353, 528, 167, 169, 4, 826, 638, 294,
520, 617, 172, 50, 833, 758, 753, 815, 660, 995, 949,
383, 11, 688])
def test_snail_031(self):
self.assertEqual(snail(
[[751, 521, 950, 82], [455, 888, 335, 526], [105, 724, 129, 53],
[380, 655, 725, 828]]),
[751, 521, 950, 82, 526, 53, 828, 725, 655, 380, 105,
455, 888, 335, 129, 724])
def test_snail_032(self):
self.assertEqual(snail([[543]]), [543])
def test_snail_033(self):
self.assertEqual(snail([[229, 998, 713, 612, 345, 412, 73, 287, 921, 44,
509, 147, 815, 84],
[202, 726, 739, 170, 976, 345, 944, 506, 848,
942, 98, 297, 75, 807],
[893, 82, 958, 458, 916, 954, 418, 436, 492, 86,
792, 226, 925, 268],
[370, 388, 588, 171, 945, 358, 281, 657, 577,
147, 44, 352, 899, 119],
[63, 834, 521, 924, 276, 174, 483, 414, 999,
932, 97, 492, 833, 363],
[983, 187, 828, 23, 387, 853, 203, 130, 187,
820, 569, 974, 494, 870],
[265, 162, 207, 733, 32, 925, 259, 761, 166,
231, 504, 503, 64, 851],
[434, 330, 43, 791, 846, 790, 566, 474, 702,
462, 693, 826, 682, 881],
[752, 68, 291, 180, 294, 674, 433, 486, 768,
743, 498, 98, 61, 154],
[52, 47, 323, 362, 247, 135, 716, 566, 713, 977,
78, 222, 300, 909],
[265, 17, 534, 221, 142, 430, 935, 948, 600, 79,
898, 229, 949, 656],
[850, 639, 989, 941, 84, 62, 850, 437, 25, 538,
670, 868, 406, 755],
[370, 978, 377, 131, 102, 929, 459, 201, 14,
981, 461, 153, 665, 352],
[374, 581, 593, 665, 922, 259, 899, 586, 405,
812, 645, 820, 321, 535]]),
[229, 998, 713, 612, 345, 412, 73, 287, 921, 44, 509,
147, 815, 84, 807, 268, 119, 363, 870, 851, 881, 154,
909, 656, 755, 352, 535, 321, 820, 645, 812, 405, 586,
899, 259, 922, 665, 593, 581, 374, 370, 850, 265, 52,
752, 434, 265, 983, 63, 370, 893, 202, 726, 739, 170,
976, 345, 944, 506, 848, 942, 98, 297, 75, 925, 899,
833, 494, 64, 682, 61, 300, 949, 406, 665, 153, 461,
981, 14, 201, 459, 929, 102, 131, 377, 978, 639, 17,
47, 68, 330, 162, 187, 834, 388, 82, 958, 458, 916,
954, 418, 436, 492, 86, 792, 226, 352, 492, 974, 503,
826, 98, 222, 229, 868, 670, 538, 25, 437, 850, 62,
84, 941, 989, 534, 323, 291, 43, 207, 828, 521, 588,
171, 945, 358, 281, 657, 577, 147, 44, 97, 569, 504,
693, 498, 78, 898, 79, 600, 948, 935, 430, 142, 221,
362, 180, 791, 733, 23, 924, 276, 174, 483, 414, 999,
932, 820, 231, 462, 743, 977, 713, 566, 716, 135, 247,
294, 846, 32, 387, 853, 203, 130, 187, 166, 702, 768,
486, 433, 674, 790, 925, 259, 761, 474, 566])
def test_snail_034(self):
self.assertEqual(snail([[543, 159, 630, 512, 408, 22, 659, 938, 716,
955, 142, 6, 273, 723],
[899, 659, 592, 655, 57, 191, 321, 795, 226,
317, 372, 190, 368, 804],
[214, 369, 514, 853, 25, 423, 744, 462, 181,
663, 863, 747, 152, 353],
[117, 9, 923, 420, 253, 550, 729, 881, 696, 208,
269, 362, 242, 177],
[625, 547, 37, 512, 130, 542, 853, 646, 551,
801, 257, 306, 206, 361],
[271, 719, 731, 679, 306, 529, 531, 846, 891,
420, 871, 537, 514, 117],
[350, 890, 866, 614, 496, 485, 88, 13, 488, 842,
197, 891, 854, 554],
[278, 713, 485, 671, 556, 687, 246, 19, 293,
906, 1000, 375, 531, 126],
[641, 531, 586, 598, 991, 366, 229, 169, 644,
562, 847, 724, 546, 904],
[859, 329, 116, 455, 986, 255, 334, 156, 188,
438, 112, 409, 283, 653],
[844, 612, 215, 684, 518, 422, 922, 741, 33,
196, 272, 51, 604, 951],
[457, 68, 327, 589, 617, 942, 5, 200, 722, 725,
971, 886, 972, 961],
[817, 172, 829, 438, 738, 639, 453, 565, 270,
683, 405, 829, 664, 749],
[347, 518, 664, 43, 591, 52, 685, 427, 716, 578,
854, 88, 673, 458]]),
[543, 159, 630, 512, 408, 22, 659, 938, 716, 955, 142,
6, 273, 723, 804, 353, 177, 361, 117, 554, 126, 904,
653, 951, 961, 749, 458, 673, 88, 854, 578, 716, 427,
685, 52, 591, 43, 664, 518, 347, 817, 457, 844, 859,
641, 278, 350, 271, 625, 117, 214, 899, 659, 592, 655,
57, 191, 321, 795, 226, 317, 372, 190, 368, 152, 242,
206, 514, 854, 531, 546, 283, 604, 972, 664, 829, 405,
683, 270, 565, 453, 639, 738, 438, 829, 172, 68, 612,
329, 531, 713, 890, 719, 547, 9, 369, 514, 853, 25,
423, 744, 462, 181, 663, 863, 747, 362, 306, 537, 891,
375, 724, 409, 51, 886, 971, 725, 722, 200, 5, 942,
617, 589, 327, 215, 116, 586, 485, 866, 731, 37, 923,
420, 253, 550, 729, 881, 696, 208, 269, 257, 871, 197,
1000, 847, 112, 272, 196, 33, 741, 922, 422, 518, 684,
455, 598, 671, 614, 679, 512, 130, 542, 853, 646, 551,
801, 420, 842, 906, 562, 438, 188, 156, 334, 255, 986,
991, 556, 496, 306, 529, 531, 846, 891, 488, 293, 644,
169, 229, 366, 687, 485, 88, 13, 19, 246])
def test_snail_035(self):
self.assertEqual(snail(
[[805, 737, 255, 944, 227, 940, 373, 877, 581, 787, 278, 332],
[64, 412, 532, 342, 955, 252, 339, 890, 26, 793, 124, 394],
[814, 764, 949, 785, 415, 832, 711, 188, 65, 623, 255, 469],
[110, 743, 29, 583, 871, 275, 878, 329, 107, 698, 107, 523],
[212, 73, 731, 628, 188, 215, 22, 479, 650, 523, 1000, 926],
[383, 241, 377, 580, 798, 363, 103, 802, 427, 943, 877, 919],
[387, 291, 796, 951, 13, 601, 617, 451, 340, 203, 336, 42],
[412, 654, 456, 885, 799, 937, 971, 608, 17, 481, 383, 748],
[39, 178, 45, 684, 995, 672, 707, 397, 999, 98, 373, 396],
[62, 984, 818, 343, 914, 165, 470, 510, 86, 545, 993, 448],
[105, 178, 404, 878, 906, 445, 706, 798, 613, 433, 492, 518],
[744, 254, 817, 85, 813, 574, 193, 588, 505, 162, 819, 636]]),
[805, 737, 255, 944, 227, 940, 373, 877, 581, 787, 278,
332, 394, 469, 523, 926, 919, 42, 748, 396, 448, 518,
636, 819, 162, 505, 588, 193, 574, 813, 85, 817, 254,
744, 105, 62, 39, 412, 387, 383, 212, 110, 814, 64,
412, 532, 342, 955, 252, 339, 890, 26, 793, 124, 255,
107, 1000, 877, 336, 383, 373, 993, 492, 433, 613,
798, 706, 445, 906, 878, 404, 178, 984, 178, 654, 291,
241, 73, 743, 764, 949, 785, 415, 832, 711, 188, 65,
623, 698, 523, 943, 203, 481, 98, 545, 86, 510, 470,
165, 914, 343, 818, 45, 456, 796, 377, 731, 29, 583,
871, 275, 878, 329, 107, 650, 427, 340, 17, 999, 397,
707, 672, 995, 684, 885, 951, 580, 628, 188, 215, 22,
479, 802, 451, 608, 971, 937, 799, 13, 798, 363, 103,
617, 601])
def test_snail_036(self):
self.assertEqual(snail(
[[997, 44, 256, 241, 586, 435, 204, 852, 283, 678],
[536, 493, 608, 713, 378, 476, 645, 242, 657, 560],
[609, 310, 407, 973, 835, 59, 771, 982, 985, 55],
[948, 389, 927, 772, 391, 672, 254, 120, 915, 655],
[993, 544, 661, 167, 875, 343, 129, 64, 471, 611],
[186, 216, 598, 814, 94, 694, 135, 7, 374, 60],
[487, 528, 461, 860, 913, 283, 276, 354, 679, 778],
[636, 627, 996, 319, 813, 600, 548, 491, 948, 178],
[995, 381, 855, 47, 403, 250, 912, 709, 322, 993],
[604, 150, 814, 285, 749, 84, 752, 680, 900, 222]]),
[997, 44, 256, 241, 586, 435, 204, 852, 283, 678, 560,
55, 655, 611, 60, 778, 178, 993, 222, 900, 680, 752,
84, 749, 285, 814, 150, 604, 995, 636, 487, 186, 993,
948, 609, 536, 493, 608, 713, 378, 476, 645, 242, 657,
985, 915, 471, 374, 679, 948, 322, 709, 912, 250, 403,
47, 855, 381, 627, 528, 216, 544, 389, 310, 407, 973,
835, 59, 771, 982, 120, 64, 7, 354, 491, 548, 600,
813, 319, 996, 461, 598, 661, 927, 772, 391, 672, 254,
129, 135, 276, 283, 913, 860, 814, 167, 875, 343, 694,
94])
def test_snail_037(self):
self.assertEqual(snail([[924, 474, 327, 244, 69, 575, 52, 587, 477, 521,
871, 701, 236],
[521, 643, 870, 149, 368, 896, 185, 164, 142,
419, 686, 209, 67],
[161, 18, 876, 414, 245, 830, 900, 985, 627,
760, 366, 872, 85],
[885, 784, 859, 378, 232, 55, 455, 716, 558, 68,
430, 331, 35],
[859, 82, 149, 721, 581, 743, 272, 68, 600, 363,
433, 350, 62],
[435, 913, 330, 343, 219, 649, 84, 442, 282,
315, 368, 567, 33],
[756, 543, 726, 158, 116, 526, 43, 351, 731,
966, 190, 494, 396],
[991, 673, 736, 193, 693, 113, 21, 298, 699,
837, 141, 997, 872],
[589, 658, 79, 77, 493, 79, 163, 484, 631, 547,
53, 991, 387],
[536, 709, 286, 817, 344, 230, 460, 648, 13, 13,
268, 604, 512],
[107, 41, 145, 882, 103, 149, 377, 919, 188,
631, 686, 965, 945],
[983, 912, 408, 29, 227, 783, 589, 629, 432,
119, 498, 481, 652],
[470, 415, 9, 285, 695, 290, 688, 88, 702, 962,
280, 589, 7]]),
[924, 474, 327, 244, 69, 575, 52, 587, 477, 521, 871,
701, 236, 67, 85, 35, 62, 33, 396, 872, 387, 512, 945,
652, 7, 589, 280, 962, 702, 88, 688, 290, 695, 285, 9,
415, 470, 983, 107, 536, 589, 991, 756, 435, 859, 885,
161, 521, 643, 870, 149, 368, 896, 185, 164, 142, 419,
686, 209, 872, 331, 350, 567, 494, 997, 991, 604, 965,
481, 498, 119, 432, 629, 589, 783, 227, 29, 408, 912,
41, 709, 658, 673, 543, 913, 82, 784, 18, 876, 414,
245, 830, 900, 985, 627, 760, 366, 430, 433, 368, 190,
141, 53, 268, 686, 631, 188, 919, 377, 149, 103, 882,
145, 286, 79, 736, 726, 330, 149, 859, 378, 232, 55,
455, 716, 558, 68, 363, 315, 966, 837, 547, 13, 13,
648, 460, 230, 344, 817, 77, 193, 158, 343, 721, 581,
743, 272, 68, 600, 282, 731, 699, 631, 484, 163, 79,
493, 693, 116, 219, 649, 84, 442, 351, 298, 21, 113,
526, 43])
def test_snail_038(self):
self.assertEqual(snail([[332, 189, 638, 117, 858, 164, 701, 784, 749,
950, 707, 293, 233, 576],
[380, 752, 798, 298, 597, 470, 623, 773, 953,
86, 251, 504, 126, 633],
[337, 241, 413, 616, 605, 278, 289, 366, 162,
83, 632, 601, 771, 812],
[814, 497, 196, 480, 388, 471, 689, 147, 436,
568, 298, 36, 503, 120],
[867, 706, 472, 178, 529, 333, 885, 252, 864,
324, 288, 246, 463, 478],
[702, 781, 720, 927, 185, 781, 841, 175, 822,
170, 77, 144, 909, 301],
[779, 325, 154, 452, 539, 389, 191, 453, 664,
920, 216, 383, 873, 917],
[859, 868, 29, 729, 640, 104, 731, 668, 816,
335, 907, 242, 563, 950],
[230, 53, 485, 405, 276, 592, 563, 860, 770,
124, 501, 431, 370, 908],
[355, 994, 912, 644, 789, 852, 140, 693, 256,
677, 136, 488, 337, 317],
[346, 323, 9, 399, 577, 991, 9, 152, 271, 188,
222, 851, 696, 985],
[481, 705, 515, 680, 129, 670, 380, 894, 951,
245, 577, 654, 109, 754],
[889, 295, 885, 544, 579, 931, 693, 95, 772,
865, 210, 62, 232, 361],
[743, 942, 729, 57, 879, 664, 20, 779, 401, 449,
973, 521, 380, 393]]),
[332, 189, 638, 117, 858, 164, 701, 784, 749, 950, 707,
293, 233, 576, 633, 812, 120, 478, 301, 917, 950, 908,
317, 985, 754, 361, 393, 380, 521, 973, 449, 401, 779,
20, 664, 879, 57, 729, 942, 743, 889, 481, 346, 355,
230, 859, 779, 702, 867, 814, 337, 380, 752, 798, 298,
597, 470, 623, 773, 953, 86, 251, 504, 126, 771, 503,
463, 909, 873, 563, 370, 337, 696, 109, 232, 62, 210,
865, 772, 95, 693, 931, 579, 544, 885, 295, 705, 323,
994, 53, 868, 325, 781, 706, 497, 241, 413, 616, 605,
278, 289, 366, 162, 83, 632, 601, 36, 246, 144, 383,
242, 431, 488, 851, 654, 577, 245, 951, 894, 380, 670,
129, 680, 515, 9, 912, 485, 29, 154, 720, 472, 196,
480, 388, 471, 689, 147, 436, 568, 298, 288, 77, 216,
907, 501, 136, 222, 188, 271, 152, 9, 991, 577, 399,
644, 405, 729, 452, 927, 178, 529, 333, 885, 252, 864,
324, 170, 920, 335, 124, 677, 256, 693, 140, 852, 789,
276, 640, 539, 185, 781, 841, 175, 822, 664, 816, 770,
860, 563, 592, 104, 389, 191, 453, 668, 731])
def test_snail_039(self):
self.assertEqual(snail([[686, 345, 940, 678, 562, 159, 206, 990, 927,
298, 539, 662, 265, 951, 400],
[528, 940, 929, 898, 770, 67, 357, 491, 61, 867,
425, 746, 957, 613, 720],
[116, 943, 331, 211, 933, 899, 736, 195, 147,
366, 181, 973, 59, 873, 379],
[161, 879, 580, 471, 865, 871, 542, 206, 816,
807, 436, 387, 893, 970, 145],
[367, 686, 933, 883, 434, 565, 652, 528, 199,
346, 378, 377, 911, 746, 288],
[844, 843, 849, 486, 101, 957, 940, 223, 930,
113, 359, 782, 652, 783, 559],
[56, 652, 242, 424, 531, 187, 16, 752, 168, 603,
702, 435, 237, 814, 398],
[10, 230, 815, 547, 434, 244, 56, 947, 758, 155,
407, 213, 366, 418, 518],
[438, 950, 214, 575, 809, 811, 370, 916, 57,
964, 918, 461, 428, 971, 456],
[190, 751, 7, 549, 101, 648, 636, 735, 371, 122,
316, 848, 463, 552, 41],
[82, 332, 595, 889, 290, 652, 211, 874, 249,
740, 352, 870, 517, 810, 422],
[248, 681, 64, 600, 6, 399, 108, 991, 123, 413,
862, 309, 28, 957, 861],
[603, 104, 908, 12, 827, 54, 796, 166, 701, 933,
180, 308, 604, 374, 950],
[495, 877, 743, 460, 546, 160, 966, 712, 708,
606, 52, 445, 957, 762, 950],
[39, 185, 527, 228, 972, 273, 584, 336, 352,
376, 681, 554, 34, 322, 125]]),
[686, 345, 940, 678, 562, 159, 206, 990, 927, 298, 539,
662, 265, 951, 400, 720, 379, 145, 288, 559, 398, 518,
456, 41, 422, 861, 950, 950, 125, 322, 34, 554, 681,
376, 352, 336, 584, 273, 972, 228, 527, 185, 39, 495,
603, 248, 82, 190, 438, 10, 56, 844, 367, 161, 116,
528, 940, 929, 898, 770, 67, 357, 491, 61, 867, 425,
746, 957, 613, 873, 970, 746, 783, 814, 418, 971, 552,
810, 957, 374, 762, 957, 445, 52, 606, 708, 712, 966,
160, 546, 460, 743, 877, 104, 681, 332, 751, 950, 230,
652, 843, 686, 879, 943, 331, 211, 933, 899, 736, 195,
147, 366, 181, 973, 59, 893, 911, 652, 237, 366, 428,
463, 517, 28, 604, 308, 180, 933, 701, 166, 796, 54,
827, 12, 908, 64, 595, 7, 214, 815, 242, 849, 933,
580, 471, 865, 871, 542, 206, 816, 807, 436, 387, 377,
782, 435, 213, 461, 848, 870, 309, 862, 413, 123, 991,
108, 399, 6, 600, 889, 549, 575, 547, 424, 486, 883,
434, 565, 652, 528, 199, 346, 378, 359, 702, 407, 918,
316, 352, 740, 249, 874, 211, 652, 290, 101, 809, 434,
531, 101, 957, 940, 223, 930, 113, 603, 155, 964, 122,
371, 735, 636, 648, 811, 244, 187, 16, 752, 168, 758,
57, 916, 370, 56, 947])
def test_snail_040(self):
self.assertEqual(snail([[234, 459, 8, 740, 18, 612, 971, 482, 105, 70],
[725, 582, 552, 166, 909, 83, 323, 842, 901,
479],
[139, 880, 685, 560, 197, 820, 458, 261, 491,
930],
[917, 677, 674, 610, 470, 744, 893, 604, 310,
818],
[826, 470, 627, 391, 222, 544, 687, 939, 544,
952],
[68, 614, 803, 517, 852, 251, 87, 88, 838, 229],
[269, 848, 520, 498, 486, 567, 575, 779, 706,
74],
[567, 438, 209, 639, 573, 640, 885, 830, 665,
130],
[183, 483, 877, 703, 75, 515, 323, 482, 901,
562],
[426, 570, 572, 144, 924, 285, 48, 976, 282,
802]]),
[234, 459, 8, 740, 18, 612, 971, 482, 105, 70, 479,
930, 818, 952, 229, 74, 130, 562, 802, 282, 976, 48,
285, 924, 144, 572, 570, 426, 183, 567, 269, 68, 826,
917, 139, 725, 582, 552, 166, 909, 83, 323, 842, 901,
491, 310, 544, 838, 706, 665, 901, 482, 323, 515, 75,
703, 877, 483, 438, 848, 614, 470, 677, 880, 685, 560,
197, 820, 458, 261, 604, 939, 88, 779, 830, 885, 640,
573, 639, 209, 520, 803, 627, 674, 610, 470, 744, 893,
687, 87, 575, 567, 486, 498, 517, 391, 222, 544, 251,
852])
def test_snail_041(self):
self.assertEqual(snail(
[[966, 770, 415, 443, 591], [733, 239, 637, 938, 246],
[567, 292, 816, 631, 702], [315, 312, 771, 408, 474],
[275, 740, 146, 719, 961]]),
[966, 770, 415, 443, 591, 246, 702, 474, 961, 719, 146,
740, 275, 315, 567, 733, 239, 637, 938, 631, 408, 771,
312, 292, 816])
def test_snail_042(self):
self.assertEqual(snail([[928, 128, 90, 593, 147, 757, 325, 206, 400,
949, 633, 558, 879],
[190, 99, 708, 968, 665, 847, 159, 388, 584,
547, 469, 788, 586],
[684, 65, 832, 834, 651, 891, 458, 712, 596,
377, 465, 789, 44],
[653, 136, 125, 990, 21, 351, 405, 771, 910,
922, 213, 998, 75],
[165, 220, 334, 367, 603, 930, 821, 232, 624,
209, 353, 156, 271],
[437, 145, 802, 747, 716, 565, 784, 364, 524,
475, 283, 81, 501],
[821, 590, 652, 948, 704, 922, 334, 102, 905,
13, 335, 462, 425],
[118, 633, 924, 637, 123, 245, 432, 807, 579,
480, 828, 79, 942],
[805, 592, 718, 356, 790, 549, 125, 844, 691,
71, 835, 150, 747],
[87, 541, 24, 922, 952, 881, 463, 192, 319, 765,
771, 368, 432],
[149, 859, 949, 368, 342, 942, 337, 598, 490,
889, 50, 794, 786],
[868, 167, 392, 93, 126, 521, 922, 941, 210,
170, 982, 94, 43],
[583, 931, 24, 750, 990, 453, 518, 9, 657, 789,
678, 676, 756]]),
[928, 128, 90, 593, 147, 757, 325, 206, 400, 949, 633,
558, 879, 586, 44, 75, 271, 501, 425, 942, 747, 432,
786, 43, 756, 676, 678, 789, 657, 9, 518, 453, 990,
750, 24, 931, 583, 868, 149, 87, 805, 118, 821, 437,
165, 653, 684, 190, 99, 708, 968, 665, 847, 159, 388,
584, 547, 469, 788, 789, 998, 156, 81, 462, 79, 150,
368, 794, 94, 982, 170, 210, 941, 922, 521, 126, 93,
392, 167, 859, 541, 592, 633, 590, 145, 220, 136, 65,
832, 834, 651, 891, 458, 712, 596, 377, 465, 213, 353,
283, 335, 828, 835, 771, 50, 889, 490, 598, 337, 942,
342, 368, 949, 24, 718, 924, 652, 802, 334, 125, 990,
21, 351, 405, 771, 910, 922, 209, 475, 13, 480, 71,
765, 319, 192, 463, 881, 952, 922, 356, 637, 948, 747,
367, 603, 930, 821, 232, 624, 524, 905, 579, 691, 844,
125, 549, 790, 123, 704, 716, 565, 784, 364, 102, 807,
432, 245, 922, 334])
def test_snail_043(self):
self.assertEqual(snail(
[[87, 462, 110, 33, 41, 613, 234, 971, 424, 490, 399],
[489, 50, 350, 304, 182, 24, 614, 707, 935, 678, 706],
[363, 94, 140, 854, 757, 467, 369, 903, 629, 342, 144],
[838, 301, 145, 18, 841, 484, 374, 723, 136, 333, 757],
[316, 713, 514, 19, 847, 337, 830, 358, 313, 138, 270],
[869, 803, 76, 126, 424, 80, 383, 117, 180, 519, 534],
[663, 709, 774, 866, 180, 59, 780, 653, 290, 958, 920],
[931, 926, 174, 65, 301, 51, 255, 19, 439, 910, 474],
[229, 84, 159, 158, 470, 597, 842, 83, 794, 285, 20],
[248, 938, 591, 246, 529, 506, 869, 146, 600, 738, 931],
[391, 267, 55, 182, 281, 344, 431, 338, 792, 443, 687]]),
[87, 462, 110, 33, 41, 613, 234, 971, 424, 490, 399,
706, 144, 757, 270, 534, 920, 474, 20, 931, 687, 443,
792, 338, 431, 344, 281, 182, 55, 267, 391, 248, 229,
931, 663, 869, 316, 838, 363, 489, 50, 350, 304, 182,
24, 614, 707, 935, 678, 342, 333, 138, 519, 958, 910,
285, 738, 600, 146, 869, 506, 529, 246, 591, 938, 84,
926, 709, 803, 713, 301, 94, 140, 854, 757, 467, 369,
903, 629, 136, 313, 180, 290, 439, 794, 83, 842, 597,
470, 158, 159, 174, 774, 76, 514, 145, 18, 841, 484,
374, 723, 358, 117, 653, 19, 255, 51, 301, 65, 866,
126, 19, 847, 337, 830, 383, 780, 59, 180, 424, 80])
def test_snail_044(self):
self.assertEqual(snail(
[[64, 644, 694, 5, 163, 760, 568, 84, 67, 517, 872],
[933, 412, 172, 162, 97, 626, 830, 969, 809, 522, 539],
[910, 121, 228, 803, 443, 4, 341, 64, 60, 438, 964],
[320, 135, 26, 700, 58, 741, 111, 944, 580, 855, 195],
[2, 802, 971, 42, 232, 432, 910, 803, 694, 46, 826],
[612, 974, 539, 639, 21, 878, 809, 246, 218, 331, 974],
[804, 448, 962, 406, 439, 556, 826, 109, 798, 609, 867],
[260, 335, 33, 122, 577, 639, 88, 887, 760, 705, 784],
[893, 908, 88, 16, 905, 923, 220, 690, 648, 747, 591],
[276, 217, 551, 996, 879, 575, 154, 724, 468, 856, 317],
[427, 269, 210, 221, 352, 980, 952, 189, 573, 520, 383]]),
[64, 644, 694, 5, 163, 760, 568, 84, 67, 517, 872, 539,
964, 195, 826, 974, 867, 784, 591, 317, 383, 520, 573,
189, 952, 980, 352, 221, 210, 269, 427, 276, 893, 260,
804, 612, 2, 320, 910, 933, 412, 172, 162, 97, 626,
830, 969, 809, 522, 438, 855, 46, 331, 609, 705, 747,
856, 468, 724, 154, 575, 879, 996, 551, 217, 908, 335,
448, 974, 802, 135, 121, 228, 803, 443, 4, 341, 64,
60, 580, 694, 218, 798, 760, 648, 690, 220, 923, 905,
16, 88, 33, 962, 539, 971, 26, 700, 58, 741, 111, 944,
803, 246, 109, 887, 88, 639, 577, 122, 406, 639, 42,
232, 432, 910, 809, 826, 556, 439, 21, 878])
def test_snail_045(self):
self.assertEqual(snail([[631, 374, 877, 595, 738, 324, 704, 280, 468,
923, 505, 471, 786],
[725, 339, 191, 381, 268, 471, 998, 922, 108,
118, 197, 889, 647],
[422, 442, 5, 197, 843, 702, 57, 58, 593, 76,
159, 773, 840],
[166, 158, 990, 841, 117, 450, 765, 455, 254,
99, 224, 624, 608],
[129, 252, 729, 219, 199, 309, 229, 855, 542,
972, 470, 850, 286],
[255, 368, 57, 890, 572, 308, 655, 779, 134,
580, 335, 387, 888],
[27, 281, 301, 15, 780, 318, 425, 931, 277, 972,
499, 622, 692],
[466, 70, 405, 84, 594, 63, 228, 946, 440, 354,
720, 480, 259],
[257, 323, 934, 503, 258, 510, 921, 254, 430,
508, 484, 353, 949],
[321, 168, 497, 248, 670, 628, 258, 877, 585,
965, 796, 567, 233],
[190, 127, 715, 552, 621, 806, 212, 367, 420,
826, 534, 428, 604],
[908, 504, 880, 691, 117, 289, 731, 232, 629,
161, 417, 942, 52],
[341, 721, 127, 728, 46, 763, 884, 431, 905,
951, 338, 775, 868]]),
[631, 374, 877, 595, 738, 324, 704, 280, 468, 923, 505,
471, 786, 647, 840, 608, 286, 888, 692, 259, 949, 233,
604, 52, 868, 775, 338, 951, 905, 431, 884, 763, 46,
728, 127, 721, 341, 908, 190, 321, 257, 466, 27, 255,
129, 166, 422, 725, 339, 191, 381, 268, 471, 998, 922,
108, 118, 197, 889, 773, 624, 850, 387, 622, 480, 353,
567, 428, 942, 417, 161, 629, 232, 731, 289, 117, 691,
880, 504, 127, 168, 323, 70, 281, 368, 252, 158, 442,
5, 197, 843, 702, 57, 58, 593, 76, 159, 224, 470, 335,
499, 720, 484, 796, 534, 826, 420, 367, 212, 806, 621,
552, 715, 497, 934, 405, 301, 57, 729, 990, 841, 117,
450, 765, 455, 254, 99, 972, 580, 972, 354, 508, 965,
585, 877, 258, 628, 670, 248, 503, 84, 15, 890, 219,
199, 309, 229, 855, 542, 134, 277, 440, 430, 254, 921,
510, 258, 594, 780, 572, 308, 655, 779, 931, 946, 228,
63, 318, 425])
def test_snail_046(self):
self.assertEqual(snail(
[[448, 727, 434, 177, 987], [288, 839, 372, 379, 326],
[266, 287, 407, 590, 327], [782, 941, 470, 580, 365],
[823, 313, 939, 776, 834]]),
[448, 727, 434, 177, 987, 326, 327, 365, 834, 776, 939,
313, 823, 782, 266, 288, 839, 372, 379, 590, 580, 470,
941, 287, 407])
def test_snail_047(self):
self.assertEqual(snail([[134, 625, 697, 457, 3, 817, 998, 303, 562, 680,
864, 613, 483, 648, 569, 37],
[328, 426, 402, 699, 409, 971, 63, 339, 238,
759, 392, 835, 574, 349, 949, 842],
[491, 104, 329, 958, 321, 561, 47, 185, 759,
121, 608, 163, 746, 268, 114, 96],
[166, 374, 830, 603, 171, 472, 891, 395, 650,
879, 219, 441, 151, 672, 331, 202],
[763, 122, 903, 770, 555, 406, 876, 126, 509,
564, 333, 937, 863, 163, 970, 818],
[736, 749, 999, 758, 110, 809, 701, 861, 153,
823, 721, 107, 944, 830, 750, 333],
[750, 454, 398, 921, 852, 451, 774, 157, 715,
578, 474, 135, 955, 838, 386, 887],
[140, 935, 474, 862, 292, 785, 433, 271, 153,
908, 426, 686, 694, 206, 251, 533],
[13, 708, 970, 604, 773, 469, 663, 311, 734,
930, 528, 284, 558, 278, 112, 796],
[737, 293, 588, 611, 94, 821, 436, 105, 464,
543, 35, 623, 3, 33, 611, 809],
[812, 394, 490, 319, 385, 300, 47, 217, 181,
839, 527, 229, 889, 212, 754, 34],
[615, 169, 501, 732, 472, 298, 728, 494, 639,
582, 167, 79, 679, 66, 116, 445],
[307, 688, 864, 469, 119, 374, 338, 182, 396,
651, 77, 319, 744, 499, 95, 599],
[684, 884, 412, 446, 154, 747, 892, 34, 875,
845, 609, 455, 551, 940, 151, 932],
[949, 14, 409, 86, 966, 430, 157, 919, 875, 783,
268, 184, 420, 162, 970, 673],
[65, 50, 700, 314, 348, 547, 655, 313, 165, 573,
789, 164, 219, 216, 353, 975]]),
[134, 625, 697, 457, 3, 817, 998, 303, 562, 680, 864,
613, 483, 648, 569, 37, 842, 96, 202, 818, 333, 887,
533, 796, 809, 34, 445, 599, 932, 673, 975, 353, 216,
219, 164, 789, 573, 165, 313, 655, 547, 348, 314, 700,
50, 65, 949, 684, 307, 615, 812, 737, 13, 140, 750,
736, 763, 166, 491, 328, 426, 402, 699, 409, 971, 63,
339, 238, 759, 392, 835, 574, 349, 949, 114, 331, 970,
750, 386, 251, 112, 611, 754, 116, 95, 151, 970, 162,
420, 184, 268, 783, 875, 919, 157, 430, 966, 86, 409,
14, 884, 688, 169, 394, 293, 708, 935, 454, 749, 122,
374, 104, 329, 958, 321, 561, 47, 185, 759, 121, 608,
163, 746, 268, 672, 163, 830, 838, 206, 278, 33, 212,
66, 499, 940, 551, 455, 609, 845, 875, 34, 892, 747,
154, 446, 412, 864, 501, 490, 588, 970, 474, 398, 999,
903, 830, 603, 171, 472, 891, 395, 650, 879, 219, 441,
151, 863, 944, 955, 694, 558, 3, 889, 679, 744, 319,
77, 651, 396, 182, 338, 374, 119, 469, 732, 319, 611,
604, 862, 921, 758, 770, 555, 406, 876, 126, 509, 564,
333, 937, 107, 135, 686, 284, 623, 229, 79, 167, 582,
639, 494, 728, 298, 472, 385, 94, 773, 292, 852, 110,
809, 701, 861, 153, 823, 721, 474, 426, 528, 35, 527,
839, 181, 217, 47, 300, 821, 469, 785, 451, 774, 157,
715, 578, 908, 930, 543, 464, 105, 436, 663, 433, 271,
153, 734, 311])
def test_snail_048(self):
self.assertEqual(snail(
[[148, 131, 809, 558, 988], [226, 872, 217, 699, 709],
[326, 703, 976, 559, 826], [749, 582, 891, 321, 58],
[773, 142, 687, 234, 325]]),
[148, 131, 809, 558, 988, 709, 826, 58, 325, 234, 687,
142, 773, 749, 326, 226, 872, 217, 699, 559, 321, 891,
582, 703, 976])
def test_snail_049(self):
self.assertEqual(snail([[705, 149, 326, 506, 792, 406, 65, 525, 996,
158, 592, 282, 643, 696, 31, 520, 757, 275,
276],
[395, 704, 227, 598, 163, 173, 844, 171, 882,
571, 228, 161, 943, 43, 278, 3, 680, 719, 746],
[871, 369, 979, 617, 840, 771, 315, 81, 751,
543, 799, 516, 452, 899, 115, 102, 262, 234,
751],
[667, 55, 885, 708, 943, 586, 330, 992, 663, 19,
180, 786, 89, 208, 486, 706, 742, 854, 883],
[103, 917, 812, 629, 301, 326, 783, 757, 747,
217, 464, 220, 562, 390, 45, 883, 755, 278,
96],
[58, 584, 52, 378, 774, 536, 631, 392, 592, 219,
897, 685, 895, 23, 749, 884, 417, 365, 463],
[664, 410, 795, 818, 377, 952, 133, 43, 696,
342, 423, 412, 93, 337, 963, 722, 730, 824,
748],
[274, 831, 339, 463, 30, 408, 981, 958, 816,
799, 771, 659, 368, 494, 854, 878, 811, 16,
433],
[55, 449, 296, 203, 66, 988, 124, 753, 17, 600,
108, 79, 710, 973, 4, 847, 137, 725, 579],
[180, 356, 532, 283, 330, 882, 65, 248, 621,
572, 310, 119, 394, 624, 463, 326, 872, 983,
335],
[125, 938, 860, 228, 485, 110, 914, 335, 985,
788, 16, 492, 118, 658, 83, 673, 310, 463,
128],
[896, 593, 150, 280, 186, 824, 408, 2, 842, 388,
750, 674, 634, 221, 435, 728, 183, 685, 119],
[478, 785, 827, 612, 727, 240, 864, 732, 557,
136, 936, 280, 12, 953, 210, 158, 70, 762, 97],
[209, 645, 785, 915, 570, 703, 602, 696, 480,
206, 360, 662, 900, 39, 967, 709, 439, 5, 933],
[441, 925, 978, 564, 488, 326, 796, 781, 197,
696, 81, 630, 144, 317, 215, 987, 154, 30,
142],
[112, 745, 428, 350, 952, 650, 589, 463, 204,
57, 648, 273, 336, 497, 280, 697, 991, 997,
700],
[731, 537, 58, 463, 528, 662, 243, 565, 934,
490, 476, 244, 23, 859, 237, 545, 623, 6, 478],
[54, 910, 609, 160, 253, 282, 264, 395, 951,
466, 832, 888, 589, 309, 698, 27, 242, 647,
506],
[954, 699, 371, 224, 674, 22, 408, 24, 461, 574,
344, 658, 634, 902, 534, 85, 9, 177, 809]]),
[705, 149, 326, 506, 792, 406, 65, 525, 996, 158, 592,
282, 643, 696, 31, 520, 757, 275, 276, 746, 751, 883,
96, 463, 748, 433, 579, 335, 128, 119, 97, 933, 142,
700, 478, 506, 809, 177, 9, 85, 534, 902, 634, 658,
344, 574, 461, 24, 408, 22, 674, 224, 371, 699, 954,
54, 731, 112, 441, 209, 478, 896, 125, 180, 55, 274,
664, 58, 103, 667, 871, 395, 704, 227, 598, 163, 173,
844, 171, 882, 571, 228, 161, 943, 43, 278, 3, 680,
719, 234, 854, 278, 365, 824, 16, 725, 983, 463, 685,
762, 5, 30, 997, 6, 647, 242, 27, 698, 309, 589, 888,
832, 466, 951, 395, 264, 282, 253, 160, 609, 910, 537,
745, 925, 645, 785, 593, 938, 356, 449, 831, 410, 584,
917, 55, 369, 979, 617, 840, 771, 315, 81, 751, 543,
799, 516, 452, 899, 115, 102, 262, 742, 755, 417, 730,
811, 137, 872, 310, 183, 70, 439, 154, 991, 623, 545,
237, 859, 23, 244, 476, 490, 934, 565, 243, 662, 528,
463, 58, 428, 978, 785, 827, 150, 860, 532, 296, 339,
795, 52, 812, 885, 708, 943, 586, 330, 992, 663, 19,
180, 786, 89, 208, 486, 706, 883, 884, 722, 878, 847,
326, 673, 728, 158, 709, 987, 697, 280, 497, 336, 273,
648, 57, 204, 463, 589, 650, 952, 350, 564, 915, 612,
280, 228, 283, 203, 463, 818, 378, 629, 301, 326, 783,
757, 747, 217, 464, 220, 562, 390, 45, 749, 963, 854,
4, 463, 83, 435, 210, 967, 215, 317, 144, 630, 81,
696, 197, 781, 796, 326, 488, 570, 727, 186, 485, 330,
66, 30, 377, 774, 536, 631, 392, 592, 219, 897, 685,
895, 23, 337, 494, 973, 624, 658, 221, 953, 39, 900,
662, 360, 206, 480, 696, 602, 703, 240, 824, 110, 882,
988, 408, 952, 133, 43, 696, 342, 423, 412, 93, 368,
710, 394, 118, 634, 12, 280, 936, 136, 557, 732, 864,
408, 914, 65, 124, 981, 958, 816, 799, 771, 659, 79,
119, 492, 674, 750, 388, 842, 2, 335, 248, 753, 17,
600, 108, 310, 16, 788, 985, 621, 572])
def test_snail_050(self):
self.assertEqual(snail([[772, 352, 920, 451, 295, 883, 38, 33, 562, 598,
383, 190, 999, 918, 657, 173, 310, 243, 749,
460],
[868, 561, 653, 879, 805, 379, 61, 961, 64, 493,
854, 923, 284, 452, 702, 792, 656, 695, 771,
962],
[575, 398, 964, 954, 690, 209, 562, 164, 793,
976, 475, 330, 814, 655, 710, 811, 570, 20,
373, 610],
[799, 80, 212, 607, 883, 605, 697, 849, 982,
661, 68, 1000, 250, 950, 796, 122, 601, 798,
50, 473],
[648, 224, 554, 600, 467, 884, 2, 280, 425, 105,
226, 557, 661, 814, 881, 477, 370, 872, 659,
927],
[50, 371, 90, 503, 987, 116, 255, 374, 300, 948,
323, 898, 296, 361, 455, 546, 622, 633, 987,
34],
[579, 674, 687, 200, 919, 843, 315, 736, 450,
74, 420, 214, 736, 519, 696, 801, 571, 265, 46,
237],
[563, 899, 518, 645, 484, 597, 183, 589, 888,
826, 717, 73, 576, 799, 888, 231, 304, 510,
167, 831],
[724, 228, 499, 474, 241, 346, 119, 150, 200,
443, 641, 64, 147, 137, 161, 378, 536, 46, 176,
711],
[516, 527, 767, 644, 171, 340, 400, 775, 654,
690, 324, 130, 873, 579, 34, 550, 42, 834, 905,
174],
[26, 59, 337, 14, 944, 322, 490, 974, 40, 995,
912, 636, 919, 123, 190, 482, 123, 182, 178,
289],
[730, 222, 970, 185, 610, 987, 177, 447, 885,
117, 172, 22, 795, 119, 487, 673, 245, 819,
515, 318],
[962, 540, 368, 132, 532, 594, 446, 193, 252,
326, 872, 981, 622, 416, 624, 320, 71, 891, 65,
983],
[566, 302, 822, 651, 557, 542, 962, 668, 813,
564, 474, 351, 757, 454, 748, 63, 925, 612,
389, 297],
[946, 119, 214, 810, 762, 218, 502, 3, 429, 607,
197, 465, 126, 778, 887, 847, 487, 519, 304,
84],
[442, 477, 56, 239, 116, 18, 277, 482, 595, 442,
203, 989, 474, 240, 792, 795, 57, 710, 537,
814],
[53, 528, 760, 968, 553, 920, 449, 642, 913,
164, 142, 430, 374, 756, 484, 778, 935, 732,
652, 532],
[372, 903, 178, 880, 456, 257, 221, 977, 746,
807, 729, 207, 281, 9, 208, 994, 701, 260, 811,
925],
[377, 548, 971, 375, 954, 496, 764, 731, 346,
439, 38, 33, 65, 533, 830, 448, 465, 934, 44,
687],
[213, 763, 43, 223, 196, 717, 119, 184, 247,
740, 909, 305, 646, 450, 666, 173, 25, 546,
108, 783]]),
[772, 352, 920, 451, 295, 883, 38, 33, 562, 598, 383,
190, 999, 918, 657, 173, 310, 243, 749, 460, 962, 610,
473, 927, 34, 237, 831, 711, 174, 289, 318, 983, 297,
84, 814, 532, 925, 687, 783, 108, 546, 25, 173, 666,
450, 646, 305, 909, 740, 247, 184, 119, 717, 196, 223,
43, 763, 213, 377, 372, 53, 442, 946, 566, 962, 730,
26, 516, 724, 563, 579, 50, 648, 799, 575, 868, 561,
653, 879, 805, 379, 61, 961, 64, 493, 854, 923, 284,
452, 702, 792, 656, 695, 771, 373, 50, 659, 987, 46,
167, 176, 905, 178, 515, 65, 389, 304, 537, 652, 811,
44, 934, 465, 448, 830, 533, 65, 33, 38, 439, 346,
731, 764, 496, 954, 375, 971, 548, 903, 528, 477, 119,
302, 540, 222, 59, 527, 228, 899, 674, 371, 224, 80,
398, 964, 954, 690, 209, 562, 164, 793, 976, 475, 330,
814, 655, 710, 811, 570, 20, 798, 872, 633, 265, 510,
46, 834, 182, 819, 891, 612, 519, 710, 732, 260, 701,
994, 208, 9, 281, 207, 729, 807, 746, 977, 221, 257,
456, 880, 178, 760, 56, 214, 822, 368, 970, 337, 767,
499, 518, 687, 90, 554, 212, 607, 883, 605, 697, 849,
982, 661, 68, 1000, 250, 950, 796, 122, 601, 370, 622,
571, 304, 536, 42, 123, 245, 71, 925, 487, 57, 935,
778, 484, 756, 374, 430, 142, 164, 913, 642, 449, 920,
553, 968, 239, 810, 651, 132, 185, 14, 644, 474, 645,
200, 503, 600, 467, 884, 2, 280, 425, 105, 226, 557,
661, 814, 881, 477, 546, 801, 231, 378, 550, 482, 673,
320, 63, 847, 795, 792, 240, 474, 989, 203, 442, 595,
482, 277, 18, 116, 762, 557, 532, 610, 944, 171, 241,
484, 919, 987, 116, 255, 374, 300, 948, 323, 898, 296,
361, 455, 696, 888, 161, 34, 190, 487, 624, 748, 887,
778, 126, 465, 197, 607, 429, 3, 502, 218, 542, 594,
987, 322, 340, 346, 597, 843, 315, 736, 450, 74, 420,
214, 736, 519, 799, 137, 579, 123, 119, 416, 454, 757,
351, 474, 564, 813, 668, 962, 446, 177, 490, 400, 119,
183, 589, 888, 826, 717, 73, 576, 147, 873, 919, 795,
622, 981, 872, 326, 252, 193, 447, 974, 775, 150, 200,
443, 641, 64, 130, 636, 22, 172, 117, 885, 40, 654,
690, 324, 912, 995])
def test_snail_051(self):
self.assertEqual(snail([[697, 690, 45, 97, 974, 564, 828, 482, 459, 457,
247, 709, 849, 755, 636, 252, 174],
[878, 182, 418, 18, 296, 541, 463, 226, 390,
399, 86, 57, 352, 505, 880, 822, 596],
[312, 932, 870, 982, 37, 485, 327, 970, 614,
352, 485, 832, 443, 243, 116, 468, 437],
[283, 947, 1000, 474, 878, 672, 130, 269, 601,
862, 608, 896, 683, 65, 5, 7, 854],
[103, 886, 322, 406, 644, 252, 162, 590, 859,
997, 222, 316, 188, 581, 796, 969, 58],
[229, 54, 972, 517, 133, 800, 959, 577, 62, 954,
234, 40, 491, 22, 580, 862, 428],
[853, 197, 664, 207, 581, 868, 982, 935, 2, 818,
51, 950, 425, 673, 513, 507, 992],
[917, 788, 132, 184, 895, 383, 592, 175, 810,
711, 802, 86, 43, 192, 598, 515, 822],
[59, 393, 360, 66, 673, 904, 665, 258, 264, 39,
667, 780, 679, 563, 100, 30, 272],
[150, 367, 289, 44, 24, 249, 470, 487, 212, 802,
989, 338, 650, 813, 518, 64, 465],
[523, 744, 969, 535, 138, 123, 784, 424, 16,
638, 518, 692, 26, 253, 134, 334, 279],
[563, 345, 64, 97, 67, 966, 282, 163, 530, 69,
821, 159, 70, 657, 766, 312, 667],
[102, 543, 515, 548, 410, 417, 570, 834, 78,
297, 961, 164, 375, 429, 318, 636, 506],
[358, 824, 326, 229, 271, 557, 286, 19, 74, 375,
713, 292, 984, 730, 734, 281, 275],
[9, 812, 979, 24, 319, 707, 337, 99, 454, 499,
124, 291, 400, 809, 566, 290, 151],
[815, 554, 264, 774, 823, 520, 185, 11, 860,
938, 566, 15, 367, 729, 540, 623, 14],
[13, 808, 108, 848, 278, 568, 551, 248, 3, 814,
211, 204, 808, 452, 564, 477, 744]]),
[697, 690, 45, 97, 974, 564, 828, 482, 459, 457, 247,
709, 849, 755, 636, 252, 174, 596, 437, 854, 58, 428,
992, 822, 272, 465, 279, 667, 506, 275, 151, 14, 744,
477, 564, 452, 808, 204, 211, 814, 3, 248, 551, 568,
278, 848, 108, 808, 13, 815, 9, 358, 102, 563, 523,
150, 59, 917, 853, 229, 103, 283, 312, 878, 182, 418,
18, 296, 541, 463, 226, 390, 399, 86, 57, 352, 505,
880, 822, 468, 7, 969, 862, 507, 515, 30, 64, 334,
312, 636, 281, 290, 623, 540, 729, 367, 15, 566, 938,
860, 11, 185, 520, 823, 774, 264, 554, 812, 824, 543,
345, 744, 367, 393, 788, 197, 54, 886, 947, 932, 870,
982, 37, 485, 327, 970, 614, 352, 485, 832, 443, 243,
116, 5, 796, 580, 513, 598, 100, 518, 134, 766, 318,
734, 566, 809, 400, 291, 124, 499, 454, 99, 337, 707,
319, 24, 979, 326, 515, 64, 969, 289, 360, 132, 664,
972, 322, 1000, 474, 878, 672, 130, 269, 601, 862,
608, 896, 683, 65, 581, 22, 673, 192, 563, 813, 253,
657, 429, 730, 984, 292, 713, 375, 74, 19, 286, 557,
271, 229, 548, 97, 535, 44, 66, 184, 207, 517, 406,
644, 252, 162, 590, 859, 997, 222, 316, 188, 491, 425,
43, 679, 650, 26, 70, 375, 164, 961, 297, 78, 834,
570, 417, 410, 67, 138, 24, 673, 895, 581, 133, 800,
959, 577, 62, 954, 234, 40, 950, 86, 780, 338, 692,
159, 821, 69, 530, 163, 282, 966, 123, 249, 904, 383,
868, 982, 935, 2, 818, 51, 802, 667, 989, 518, 638,
16, 424, 784, 470, 665, 592, 175, 810, 711, 39, 802,
212, 487, 258, 264])
def test_snail_052(self):
self.assertEqual(snail(
[[20, 403, 806, 88, 823], [815, 182, 755, 134, 479],
[267, 452, 774, 27, 393], [680, 645, 139, 170, 600],
[345, 733, 858, 567, 786]]),
[20, 403, 806, 88, 823, 479, 393, 600, 786, 567, 858,
733, 345, 680, 267, 815, 182, 755, 134, 27, 170, 139,
645, 452, 774])
def test_snail_053(self):
self.assertEqual(snail([[196, 838, 193, 215, 121, 793, 196, 949, 361,
294, 910, 341, 538, 137, 777],
[733, 398, 687, 983, 435, 870, 229, 107, 407,
772, 68, 915, 209, 859, 737],
[212, 594, 822, 823, 492, 867, 788, 511, 744,
679, 68, 763, 663, 708, 835],
[207, 592, 305, 579, 378, 864, 922, 874, 424,
364, 237, 930, 250, 343, 516],
[817, 144, 317, 932, 246, 346, 160, 676, 51,
860, 889, 532, 902, 60, 300],
[132, 26, 383, 247, 812, 338, 673, 679, 88, 254,
502, 553, 165, 334, 186],
[59, 683, 976, 614, 311, 493, 17, 433, 171, 254,
478, 430, 6, 238, 216],
[70, 590, 861, 521, 494, 163, 91, 792, 848, 892,
525, 313, 845, 455, 222],
[471, 326, 678, 405, 72, 724, 69, 630, 206, 767,
730, 223, 860, 290, 477],
[848, 786, 184, 788, 614, 38, 213, 908, 258,
752, 927, 756, 780, 835, 260],
[240, 604, 469, 663, 791, 671, 405, 848, 731,
335, 905, 129, 239, 679, 516],
[28, 935, 400, 783, 206, 777, 836, 627, 32, 475,
736, 206, 469, 495, 543],
[271, 429, 63, 55, 402, 237, 622, 711, 443, 603,
307, 107, 892, 627, 360],
[265, 323, 177, 700, 4, 43, 396, 551, 646, 392,
735, 686, 784, 445, 603],
[807, 589, 84, 393, 478, 843, 317, 717, 678,
341, 257, 31, 498, 454, 260]]),
[196, 838, 193, 215, 121, 793, 196, 949, 361, 294, 910,
341, 538, 137, 777, 737, 835, 516, 300, 186, 216, 222,
477, 260, 516, 543, 360, 603, 260, 454, 498, 31, 257,
341, 678, 717, 317, 843, 478, 393, 84, 589, 807, 265,
271, 28, 240, 848, 471, 70, 59, 132, 817, 207, 212,
733, 398, 687, 983, 435, 870, 229, 107, 407, 772, 68,
915, 209, 859, 708, 343, 60, 334, 238, 455, 290, 835,
679, 495, 627, 445, 784, 686, 735, 392, 646, 551, 396,
43, 4, 700, 177, 323, 429, 935, 604, 786, 326, 590,
683, 26, 144, 592, 594, 822, 823, 492, 867, 788, 511,
744, 679, 68, 763, 663, 250, 902, 165, 6, 845, 860,
780, 239, 469, 892, 107, 307, 603, 443, 711, 622, 237,
402, 55, 63, 400, 469, 184, 678, 861, 976, 383, 317,
305, 579, 378, 864, 922, 874, 424, 364, 237, 930, 532,
553, 430, 313, 223, 756, 129, 206, 736, 475, 32, 627,
836, 777, 206, 783, 663, 788, 405, 521, 614, 247, 932,
246, 346, 160, 676, 51, 860, 889, 502, 478, 525, 730,
927, 905, 335, 731, 848, 405, 671, 791, 614, 72, 494,
311, 812, 338, 673, 679, 88, 254, 254, 892, 767, 752,
258, 908, 213, 38, 724, 163, 493, 17, 433, 171, 848,
206, 630, 69, 91, 792])
def test_snail_054(self):
self.assertEqual(snail(
[[680, 28, 574, 89, 186, 359], [110, 422, 21, 950, 715, 79],
[344, 688, 686, 338, 239, 840], [320, 321, 492, 418, 905, 628],
[684, 383, 704, 429, 457, 932],
[977, 861, 351, 408, 652, 42]]),
[680, 28, 574, 89, 186, 359, 79, 840, 628, 932, 42,
652, 408, 351, 861, 977, 684, 320, 344, 110, 422, 21,
950, 715, 239, 905, 457, 429, 704, 383, 321, 688, 686,
338, 418, 492])
def test_snail_055(self):
self.assertEqual(snail([[58, 407, 6, 598, 246, 664, 722, 382, 779, 444,
939, 572, 998, 857, 973, 783, 332, 192],
[664, 600, 824, 153, 433, 187, 978, 637, 740,
427, 135, 816, 393, 522, 351, 940, 896, 65],
[126, 399, 993, 472, 941, 3, 717, 884, 803, 688,
203, 219, 414, 589, 972, 999, 730, 672],
[43, 467, 608, 228, 380, 252, 318, 177, 251,
657, 281, 509, 714, 14, 49, 909, 934, 672],
[718, 635, 676, 235, 349, 435, 914, 136, 476,
562, 653, 497, 338, 58, 63, 716, 187, 48],
[530, 480, 937, 218, 277, 678, 434, 266, 334,
95, 270, 449, 631, 192, 309, 389, 564, 924],
[279, 697, 22, 866, 170, 218, 584, 387, 992,
727, 188, 755, 564, 367, 27, 250, 250, 999],
[561, 200, 392, 765, 31, 517, 431, 463, 28, 376,
49, 428, 336, 994, 311, 814, 27, 288],
[928, 846, 706, 704, 835, 565, 752, 294, 404,
519, 269, 311, 38, 914, 216, 74, 364, 83],
[415, 30, 240, 897, 143, 567, 250, 27, 872, 101,
345, 1000, 12, 47, 485, 188, 675, 861],
[837, 586, 441, 706, 658, 312, 12, 823, 414,
485, 975, 621, 788, 912, 923, 260, 611, 863],
[299, 973, 177, 461, 147, 265, 732, 9, 521, 211,
73, 300, 919, 316, 839, 956, 164, 950],
[289, 604, 206, 623, 94, 84, 544, 200, 955, 230,
186, 194, 852, 47, 586, 687, 559, 809],
[139, 266, 610, 674, 20, 856, 866, 721, 224, 61,
754, 599, 97, 827, 934, 724, 207, 281],
[59, 42, 40, 155, 346, 392, 602, 768, 428, 104,
285, 74, 913, 885, 258, 79, 366, 114],
[205, 16, 543, 155, 384, 415, 64, 375, 841, 387,
922, 909, 489, 846, 666, 378, 933, 908],
[389, 178, 394, 265, 728, 108, 599, 398, 569,
480, 159, 635, 255, 421, 260, 230, 855, 267],
[767, 767, 591, 319, 141, 136, 915, 262, 723,
932, 887, 891, 417, 101, 415, 178, 369, 179]]),
[58, 407, 6, 598, 246, 664, 722, 382, 779, 444, 939,
572, 998, 857, 973, 783, 332, 192, 65, 672, 672, 48,
924, 999, 288, 83, 861, 863, 950, 809, 281, 114, 908,
267, 179, 369, 178, 415, 101, 417, 891, 887, 932, 723,
262, 915, 136, 141, 319, 591, 767, 767, 389, 205, 59,
139, 289, 299, 837, 415, 928, 561, 279, 530, 718, 43,
126, 664, 600, 824, 153, 433, 187, 978, 637, 740, 427,
135, 816, 393, 522, 351, 940, 896, 730, 934, 187, 564,
250, 27, 364, 675, 611, 164, 559, 207, 366, 933, 855,
230, 260, 421, 255, 635, 159, 480, 569, 398, 599, 108,
728, 265, 394, 178, 16, 42, 266, 604, 973, 586, 30,
846, 200, 697, 480, 635, 467, 399, 993, 472, 941, 3,
717, 884, 803, 688, 203, 219, 414, 589, 972, 999, 909,
716, 389, 250, 814, 74, 188, 260, 956, 687, 724, 79,
378, 666, 846, 489, 909, 922, 387, 841, 375, 64, 415,
384, 155, 543, 40, 610, 206, 177, 441, 240, 706, 392,
22, 937, 676, 608, 228, 380, 252, 318, 177, 251, 657,
281, 509, 714, 14, 49, 63, 309, 27, 311, 216, 485,
923, 839, 586, 934, 258, 885, 913, 74, 285, 104, 428,
768, 602, 392, 346, 155, 674, 623, 461, 706, 897, 704,
765, 866, 218, 235, 349, 435, 914, 136, 476, 562, 653,
497, 338, 58, 192, 367, 994, 914, 47, 912, 316, 47,
827, 97, 599, 754, 61, 224, 721, 866, 856, 20, 94,
147, 658, 143, 835, 31, 170, 277, 678, 434, 266, 334,
95, 270, 449, 631, 564, 336, 38, 12, 788, 919, 852,
194, 186, 230, 955, 200, 544, 84, 265, 312, 567, 565,
517, 218, 584, 387, 992, 727, 188, 755, 428, 311,
1000, 621, 300, 73, 211, 521, 9, 732, 12, 250, 752,
431, 463, 28, 376, 49, 269, 345, 975, 485, 414, 823,
27, 294, 404, 519, 101, 872])
def test_snail_056(self):
self.assertEqual(snail([[990, 568, 232, 648, 150, 961, 543, 323, 970,
480, 247, 655, 234, 766],
[445, 283, 695, 616, 307, 693, 516, 267, 772,
614, 375, 354, 874, 139],
[56, 886, 202, 95, 850, 689, 279, 633, 473, 300,
210, 950, 264, 392],
[656, 90, 399, 263, 200, 764, 793, 125, 644,
341, 1, 41, 315, 577],
[703, 868, 597, 797, 445, 159, 899, 961, 421,
400, 865, 37, 475, 501],
[554, 14, 369, 351, 506, 615, 921, 242, 972,
625, 402, 906, 693, 251],
[727, 518, 523, 314, 40, 458, 338, 814, 508,
135, 515, 151, 288, 433],
[456, 696, 183, 605, 963, 882, 243, 721, 924,
276, 244, 341, 592, 746],
[275, 799, 613, 400, 259, 241, 12, 991, 844, 51,
532, 893, 933, 357],
[649, 500, 240, 430, 276, 488, 583, 197, 11,
646, 285, 552, 812, 520],
[654, 829, 189, 560, 146, 26, 397, 206, 605, 64,
132, 791, 264, 469],
[823, 419, 690, 389, 997, 854, 416, 97, 267,
499, 383, 250, 856, 510],
[863, 725, 195, 653, 568, 668, 761, 598, 379,
810, 674, 535, 350, 215],
[880, 492, 584, 822, 260, 81, 726, 737, 166,
379, 347, 66, 990, 381]]),
[990, 568, 232, 648, 150, 961, 543, 323, 970, 480, 247,
655, 234, 766, 139, 392, 577, 501, 251, 433, 746, 357,
520, 469, 510, 215, 381, 990, 66, 347, 379, 166, 737,
726, 81, 260, 822, 584, 492, 880, 863, 823, 654, 649,
275, 456, 727, 554, 703, 656, 56, 445, 283, 695, 616,
307, 693, 516, 267, 772, 614, 375, 354, 874, 264, 315,
475, 693, 288, 592, 933, 812, 264, 856, 350, 535, 674,
810, 379, 598, 761, 668, 568, 653, 195, 725, 419, 829,
500, 799, 696, 518, 14, 868, 90, 886, 202, 95, 850,
689, 279, 633, 473, 300, 210, 950, 41, 37, 906, 151,
341, 893, 552, 791, 250, 383, 499, 267, 97, 416, 854,
997, 389, 690, 189, 240, 613, 183, 523, 369, 597, 399,
263, 200, 764, 793, 125, 644, 341, 1, 865, 402, 515,
244, 532, 285, 132, 64, 605, 206, 397, 26, 146, 560,
430, 400, 605, 314, 351, 797, 445, 159, 899, 961, 421,
400, 625, 135, 276, 51, 646, 11, 197, 583, 488, 276,
259, 963, 40, 506, 615, 921, 242, 972, 508, 924, 844,
991, 12, 241, 882, 458, 338, 814, 721, 243])
def test_snail_057(self):
self.assertEqual(snail([[40, 406, 36, 505, 634, 102, 702, 130],
[441, 809, 470, 914, 796, 852, 306, 978],
[919, 501, 158, 558, 536, 141, 229, 678],
[841, 688, 115, 374, 638, 735, 687, 358],
[432, 204, 983, 343, 5, 717, 999, 912],
[380, 253, 737, 263, 790, 515, 817, 270],
[298, 335, 347, 644, 356, 931, 594, 954],
[977, 832, 618, 875, 547, 995, 47, 183]]),
[40, 406, 36, 505, 634, 102, 702, 130, 978, 678, 358,
912, 270, 954, 183, 47, 995, 547, 875, 618, 832, 977,
298, 380, 432, 841, 919, 441, 809, 470, 914, 796, 852,
306, 229, 687, 999, 817, 594, 931, 356, 644, 347, 335,
253, 204, 688, 501, 158, 558, 536, 141, 735, 717, 515,
790, 263, 737, 983, 115, 374, 638, 5, 343])
def test_snail_058(self):
self.assertEqual(
snail([[935, 756, 641], [827, 444, 751], [166, 61, 775]]),
[935, 756, 641, 751, 775, 61, 166, 827, 444])
def test_snail_059(self):
self.assertEqual(snail([[21, 182], [507, 380]]), [21, 182, 380, 507])
def test_snail_060(self):
self.assertEqual(snail(
[[535, 230, 195, 719, 377], [95, 348, 60, 911, 645],
[654, 459, 570, 244, 205], [728, 622, 509, 484, 25],
[253, 883, 275, 80, 276]]),
[535, 230, 195, 719, 377, 645, 205, 25, 276, 80, 275,
883, 253, 728, 654, 95, 348, 60, 911, 244, 484, 509,
622, 459, 570])
def test_snail_061(self):
self.assertEqual(snail([[785, 961, 393, 614, 388, 455, 610, 908, 516,
364, 872, 655, 842, 764, 246],
[747, 313, 906, 923, 915, 201, 951, 533, 862,
575, 735, 729, 14, 730, 25],
[170, 790, 377, 815, 635, 93, 45, 31, 555, 762,
119, 935, 885, 180, 891],
[806, 414, 178, 167, 636, 597, 562, 768, 302,
74, 481, 549, 962, 118, 40],
[742, 767, 826, 738, 501, 914, 693, 644, 409,
81, 185, 361, 197, 649, 124],
[232, 878, 30, 498, 260, 724, 650, 544, 388,
384, 2, 159, 714, 198, 532],
[375, 157, 579, 641, 683, 263, 576, 500, 177,
402, 659, 489, 438, 839, 314],
[834, 355, 434, 66, 333, 91, 207, 2, 651, 340,
505, 175, 443, 446, 740],
[688, 517, 275, 811, 515, 461, 833, 811, 849,
406, 79, 631, 431, 108, 122],
[252, 712, 973, 15, 536, 36, 55, 901, 503, 701,
520, 690, 918, 759, 217],
[350, 784, 946, 63, 801, 911, 233, 411, 116,
355, 973, 352, 784, 50, 352],
[242, 23, 728, 771, 881, 975, 565, 498, 405,
694, 441, 621, 741, 586, 48],
[551, 928, 529, 150, 280, 388, 741, 717, 699,
211, 677, 512, 900, 416, 689],
[404, 225, 869, 308, 827, 93, 424, 276, 775,
180, 569, 51, 710, 847, 792],
[771, 527, 737, 805, 705, 766, 900, 757, 994,
640, 335, 733, 588, 921, 265]]),
[785, 961, 393, 614, 388, 455, 610, 908, 516, 364, 872,
655, 842, 764, 246, 25, 891, 40, 124, 532, 314, 740,
122, 217, 352, 48, 689, 792, 265, 921, 588, 733, 335,
640, 994, 757, 900, 766, 705, 805, 737, 527, 771, 404,
551, 242, 350, 252, 688, 834, 375, 232, 742, 806, 170,
747, 313, 906, 923, 915, 201, 951, 533, 862, 575, 735,
729, 14, 730, 180, 118, 649, 198, 839, 446, 108, 759,
50, 586, 416, 847, 710, 51, 569, 180, 775, 276, 424,
93, 827, 308, 869, 225, 928, 23, 784, 712, 517, 355,
157, 878, 767, 414, 790, 377, 815, 635, 93, 45, 31,
555, 762, 119, 935, 885, 962, 197, 714, 438, 443, 431,
918, 784, 741, 900, 512, 677, 211, 699, 717, 741, 388,
280, 150, 529, 728, 946, 973, 275, 434, 579, 30, 826,
178, 167, 636, 597, 562, 768, 302, 74, 481, 549, 361,
159, 489, 175, 631, 690, 352, 621, 441, 694, 405, 498,
565, 975, 881, 771, 63, 15, 811, 66, 641, 498, 738,
501, 914, 693, 644, 409, 81, 185, 2, 659, 505, 79,
520, 973, 355, 116, 411, 233, 911, 801, 536, 515, 333,
683, 260, 724, 650, 544, 388, 384, 402, 340, 406, 701,
503, 901, 55, 36, 461, 91, 263, 576, 500, 177, 651,
849, 811, 833, 207, 2])
def test_snail_062(self):
self.assertEqual(snail([[353, 85, 930, 216], [626, 576, 495, 991],
[581, 192, 891, 709], [350, 925, 349, 502]]),
[353, 85, 930, 216, 991, 709, 502, 349, 925, 350, 581,
626, 576, 495, 891, 192])
def test_snail_063(self):
self.assertEqual(snail([[540]]), [540])
def test_snail_064(self):
self.assertEqual(snail(
[[323, 110, 157, 740, 114, 704, 774, 106, 268, 508, 566, 474],
[399, 944, 938, 434, 715, 475, 929, 705, 940, 246, 787, 528],
[807, 311, 393, 557, 372, 756, 260, 12, 811, 4, 368, 282],
[618, 918, 279, 23, 755, 16, 141, 214, 837, 333, 916, 937],
[353, 304, 904, 659, 345, 217, 882, 563, 845, 34, 318, 763],
[420, 645, 620, 910, 271, 243, 705, 909, 841, 907, 954, 745],
[114, 445, 310, 574, 25, 779, 262, 381, 319, 231, 460, 811],
[768, 163, 698, 307, 647, 712, 617, 700, 549, 215, 645, 839],
[779, 475, 357, 508, 819, 672, 250, 228, 602, 747, 734, 598],
[217, 834, 271, 442, 745, 526, 141, 571, 331, 715, 937, 24],
[159, 231, 655, 435, 450, 532, 913, 91, 527, 105, 40, 294],
[654, 903, 196, 676, 451, 502, 602, 539, 429, 795, 646, 117]]),
[323, 110, 157, 740, 114, 704, 774, 106, 268, 508, 566,
474, 528, 282, 937, 763, 745, 811, 839, 598, 24, 294,
117, 646, 795, 429, 539, 602, 502, 451, 676, 196, 903,
654, 159, 217, 779, 768, 114, 420, 353, 618, 807, 399,
944, 938, 434, 715, 475, 929, 705, 940, 246, 787, 368,
916, 318, 954, 460, 645, 734, 937, 40, 105, 527, 91,
913, 532, 450, 435, 655, 231, 834, 475, 163, 445, 645,
304, 918, 311, 393, 557, 372, 756, 260, 12, 811, 4,
333, 34, 907, 231, 215, 747, 715, 331, 571, 141, 526,
745, 442, 271, 357, 698, 310, 620, 904, 279, 23, 755,
16, 141, 214, 837, 845, 841, 319, 549, 602, 228, 250,
672, 819, 508, 307, 574, 910, 659, 345, 217, 882, 563,
909, 381, 700, 617, 712, 647, 25, 271, 243, 705, 262,
779])
def test_snail_065(self):
self.assertEqual(snail([[986, 240, 922, 622, 119, 802, 582, 105, 664,
791, 735, 699, 470, 252, 698, 185, 108, 345,
492, 923],
[240, 476, 677, 30, 653, 350, 500, 837, 871,
723, 277, 232, 913, 969, 363, 209, 806, 50,
395, 85],
[685, 728, 491, 175, 714, 445, 721, 940, 935,
357, 321, 462, 884, 97, 210, 183, 804, 892,
424, 518],
[43, 751, 907, 556, 279, 812, 613, 69, 915, 20,
19, 446, 737, 739, 400, 713, 203, 94, 294,
335],
[48, 183, 597, 479, 293, 803, 657, 501, 358,
165, 14, 999, 153, 35, 638, 561, 25, 565, 891,
543],
[918, 781, 555, 285, 954, 969, 636, 883, 200,
883, 426, 521, 528, 495, 964, 773, 799, 545,
116, 512],
[279, 668, 405, 945, 213, 573, 712, 99, 713,
688, 492, 589, 177, 718, 651, 252, 843, 376,
657, 428],
[332, 282, 54, 321, 724, 679, 50, 698, 727, 252,
661, 306, 790, 269, 958, 673, 742, 806, 310,
568],
[785, 236, 107, 886, 498, 650, 569, 967, 185,
57, 448, 25, 101, 787, 194, 464, 508, 925, 944,
531],
[141, 283, 763, 387, 423, 348, 93, 286, 448, 71,
745, 231, 949, 228, 838, 717, 673, 24, 42,
634],
[861, 730, 300, 615, 603, 945, 225, 319, 418,
919, 514, 27, 884, 628, 229, 87, 193, 140, 692,
508],
[568, 394, 305, 601, 237, 948, 275, 480, 33,
277, 821, 38, 313, 236, 216, 27, 650, 972, 284,
554],
[613, 892, 806, 441, 975, 777, 615, 741, 534,
43, 203, 991, 405, 302, 447, 313, 800, 345, 54,
670],
[689, 699, 296, 498, 793, 199, 282, 489, 224,
839, 870, 409, 686, 935, 196, 2, 755, 257, 246,
712],
[328, 236, 205, 980, 365, 90, 865, 401, 528,
368, 802, 971, 48, 218, 30, 655, 308, 690, 285,
387],
[215, 929, 894, 328, 40, 718, 33, 112, 729, 609,
598, 956, 838, 252, 727, 798, 486, 797, 65,
758],
[162, 746, 960, 376, 695, 473, 664, 960, 948,
375, 354, 980, 614, 540, 300, 538, 822, 816,
117, 371],
[343, 801, 497, 285, 121, 244, 913, 709, 271,
252, 301, 557, 115, 678, 161, 389, 169, 38,
765, 240],
[815, 108, 350, 304, 736, 991, 769, 383, 399,
621, 397, 798, 382, 738, 344, 280, 479, 255,
398, 280],
[411, 702, 791, 603, 849, 743, 594, 468, 396,
752, 297, 515, 426, 426, 806, 385, 878, 815,
840, 50]]),
[986, 240, 922, 622, 119, 802, 582, 105, 664, 791, 735,
699, 470, 252, 698, 185, 108, 345, 492, 923, 85, 518,
335, 543, 512, 428, 568, 531, 634, 508, 554, 670, 712,
387, 758, 371, 240, 280, 50, 840, 815, 878, 385, 806,
426, 426, 515, 297, 752, 396, 468, 594, 743, 849, 603,
791, 702, 411, 815, 343, 162, 215, 328, 689, 613, 568,
861, 141, 785, 332, 279, 918, 48, 43, 685, 240, 476,
677, 30, 653, 350, 500, 837, 871, 723, 277, 232, 913,
969, 363, 209, 806, 50, 395, 424, 294, 891, 116, 657,
310, 944, 42, 692, 284, 54, 246, 285, 65, 117, 765,
398, 255, 479, 280, 344, 738, 382, 798, 397, 621, 399,
383, 769, 991, 736, 304, 350, 108, 801, 746, 929, 236,
699, 892, 394, 730, 283, 236, 282, 668, 781, 183, 751,
728, 491, 175, 714, 445, 721, 940, 935, 357, 321, 462,
884, 97, 210, 183, 804, 892, 94, 565, 545, 376, 806,
925, 24, 140, 972, 345, 257, 690, 797, 816, 38, 169,
389, 161, 678, 115, 557, 301, 252, 271, 709, 913, 244,
121, 285, 497, 960, 894, 205, 296, 806, 305, 300, 763,
107, 54, 405, 555, 597, 907, 556, 279, 812, 613, 69,
915, 20, 19, 446, 737, 739, 400, 713, 203, 25, 799,
843, 742, 508, 673, 193, 650, 800, 755, 308, 486, 822,
538, 300, 540, 614, 980, 354, 375, 948, 960, 664, 473,
695, 376, 328, 980, 498, 441, 601, 615, 387, 886, 321,
945, 285, 479, 293, 803, 657, 501, 358, 165, 14, 999,
153, 35, 638, 561, 773, 252, 673, 464, 717, 87, 27,
313, 2, 655, 798, 727, 252, 838, 956, 598, 609, 729,
112, 33, 718, 40, 365, 793, 975, 237, 603, 423, 498,
724, 213, 954, 969, 636, 883, 200, 883, 426, 521, 528,
495, 964, 651, 958, 194, 838, 229, 216, 447, 196, 30,
218, 48, 971, 802, 368, 528, 401, 865, 90, 199, 777,
948, 945, 348, 650, 679, 573, 712, 99, 713, 688, 492,
589, 177, 718, 269, 787, 228, 628, 236, 302, 935, 686,
409, 870, 839, 224, 489, 282, 615, 275, 225, 93, 569,
50, 698, 727, 252, 661, 306, 790, 101, 949, 884, 313,
405, 991, 203, 43, 534, 741, 480, 319, 286, 967, 185,
57, 448, 25, 231, 27, 38, 821, 277, 33, 418, 448, 71,
745, 514, 919])
def test_snail_066(self):
self.assertEqual(snail([[779, 390, 935, 443, 441, 932, 526, 627, 761,
633, 708, 770, 21, 872],
[754, 424, 961, 78, 264, 512, 496, 963, 781, 96,
127, 102, 443, 432],
[462, 403, 123, 808, 836, 958, 574, 126, 686,
524, 508, 557, 61, 901],
[6, 257, 831, 713, 790, 660, 2, 775, 268, 337,
75, 804, 357, 961],
[604, 802, 2, 87, 101, 475, 192, 722, 345, 173,
926, 171, 170, 293],
[12, 776, 242, 639, 641, 929, 898, 119, 5, 501,
358, 518, 440, 395],
[635, 821, 94, 345, 146, 460, 246, 555, 618,
331, 959, 907, 717, 521],
[669, 178, 275, 457, 549, 963, 216, 69, 228,
722, 444, 914, 58, 643],
[826, 947, 674, 252, 707, 10, 968, 492, 418,
191, 393, 595, 278, 540],
[797, 490, 818, 461, 131, 884, 421, 935, 299,
970, 715, 75, 516, 507],
[546, 784, 474, 248, 573, 366, 638, 696, 927,
892, 508, 311, 606, 632],
[846, 536, 776, 553, 586, 170, 327, 24, 828,
282, 927, 787, 202, 550],
[739, 628, 146, 910, 843, 244, 23, 430, 521,
810, 923, 467, 875, 938],
[222, 359, 852, 608, 514, 865, 674, 391, 344,
161, 69, 418, 188, 375]]),
[779, 390, 935, 443, 441, 932, 526, 627, 761, 633, 708,
770, 21, 872, 432, 901, 961, 293, 395, 521, 643, 540,
507, 632, 550, 938, 375, 188, 418, 69, 161, 344, 391,
674, 865, 514, 608, 852, 359, 222, 739, 846, 546, 797,
826, 669, 635, 12, 604, 6, 462, 754, 424, 961, 78,
264, 512, 496, 963, 781, 96, 127, 102, 443, 61, 357,
170, 440, 717, 58, 278, 516, 606, 202, 875, 467, 923,
810, 521, 430, 23, 244, 843, 910, 146, 628, 536, 784,
490, 947, 178, 821, 776, 802, 257, 403, 123, 808, 836,
958, 574, 126, 686, 524, 508, 557, 804, 171, 518, 907,
914, 595, 75, 311, 787, 927, 282, 828, 24, 327, 170,
586, 553, 776, 474, 818, 674, 275, 94, 242, 2, 831,
713, 790, 660, 2, 775, 268, 337, 75, 926, 358, 959,
444, 393, 715, 508, 892, 927, 696, 638, 366, 573, 248,
461, 252, 457, 345, 639, 87, 101, 475, 192, 722, 345,
173, 501, 331, 722, 191, 970, 299, 935, 421, 884, 131,
707, 549, 146, 641, 929, 898, 119, 5, 618, 228, 418,
492, 968, 10, 963, 460, 246, 555, 69, 216])
def test_snail_067(self):
self.assertEqual(snail(
[[771, 906, 164, 502, 151], [560, 297, 260, 485, 632],
[3, 884, 664, 507, 325], [639, 813, 354, 560, 226],
[274, 555, 978, 288, 756]]),
[771, 906, 164, 502, 151, 632, 325, 226, 756, 288, 978,
555, 274, 639, 3, 560, 297, 260, 485, 507, 560, 354,
813, 884, 664])
def test_snail_068(self):
self.assertEqual(snail(
[[254, 173, 160, 399, 691, 434], [849, 456, 758, 273, 917, 347],
[653, 544, 515, 483, 827, 638], [145, 862, 862, 170, 518, 727],
[702, 527, 461, 204, 727, 749], [478, 342, 652, 960, 6, 699]]),
[254, 173, 160, 399, 691, 434, 347, 638, 727, 749, 699,
6, 960, 652, 342, 478, 702, 145, 653, 849, 456, 758,
273, 917, 827, 518, 727, 204, 461, 527, 862, 544, 515,
483, 170, 862])
def test_snail_069(self):
self.assertEqual(snail(
[[226, 704, 457, 816, 131, 280], [360, 476, 612, 26, 934, 390],
[456, 641, 669, 251, 211, 954], [152, 516, 380, 865, 617, 824],
[887, 422, 509, 185, 322, 688],
[593, 21, 364, 475, 965, 533]]),
[226, 704, 457, 816, 131, 280, 390, 954, 824, 688, 533,
965, 475, 364, 21, 593, 887, 152, 456, 360, 476, 612,
26, 934, 211, 617, 322, 185, 509, 422, 516, 641, 669,
251, 865, 380])
def test_snail_070(self):
self.assertEqual(snail([[721, 438], [320, 489]]), [721, 438, 489, 320])
def test_snail_071(self):
self.assertEqual(snail(
[[27, 894, 555, 256, 430, 208], [554, 476, 381, 291, 303, 274],
[129, 801, 899, 605, 365, 252], [764, 675, 459, 554, 426, 85],
[816, 231, 149, 674, 303, 499],
[305, 116, 340, 865, 168, 954]]),
[27, 894, 555, 256, 430, 208, 274, 252, 85, 499, 954,
168, 865, 340, 116, 305, 816, 764, 129, 554, 476, 381,
291, 303, 365, 426, 303, 674, 149, 231, 675, 801, 899,
605, 554, 459])
def test_snail_072(self):
self.assertEqual(snail([[572, 40, 328, 370, 500, 359, 678, 378, 538,
858, 934, 597, 558, 719, 33, 895, 744, 664,
144, 942],
[866, 855, 310, 833, 63, 797, 898, 803, 651,
882, 732, 735, 675, 3, 262, 223, 173, 342, 85,
611],
[899, 287, 100, 560, 479, 542, 148, 688, 447,
575, 738, 640, 312, 25, 231, 757, 683, 260,
858, 346],
[1000, 625, 581, 457, 792, 537, 711, 735, 189,
665, 68, 774, 132, 208, 510, 10, 797, 727, 525,
799],
[465, 650, 818, 258, 110, 531, 816, 811, 259,
429, 56, 497, 701, 350, 938, 112, 318, 260, 88,
597],
[505, 112, 776, 421, 332, 521, 824, 55, 871,
114, 715, 725, 882, 579, 481, 425, 59, 382,
959, 807],
[258, 37, 320, 581, 567, 950, 77, 948, 540, 28,
560, 911, 307, 508, 163, 679, 687, 37, 246,
838],
[782, 59, 179, 588, 996, 675, 37, 425, 607, 688,
629, 34, 975, 885, 188, 852, 343, 841, 952,
103],
[432, 454, 425, 198, 425, 305, 909, 997, 263,
813, 666, 922, 619, 942, 262, 386, 730, 197,
664, 643],
[536, 668, 164, 476, 477, 667, 875, 990, 655,
985, 824, 684, 263, 111, 82, 828, 657, 131,
819, 210],
[943, 974, 501, 727, 825, 510, 913, 133, 947,
301, 117, 283, 952, 643, 787, 24, 345, 104,
323, 525],
[461, 589, 200, 794, 521, 39, 167, 52, 836, 477,
437, 507, 264, 717, 663, 347, 623, 669, 262,
34],
[370, 556, 920, 122, 82, 952, 628, 124, 245, 87,
213, 238, 792, 388, 47, 531, 918, 634, 368,
312],
[635, 472, 7, 883, 622, 910, 757, 959, 318, 933,
887, 877, 242, 418, 571, 610, 671, 745, 303,
14],
[128, 96, 532, 485, 66, 665, 373, 829, 848, 850,
124, 732, 618, 724, 34, 686, 851, 832, 407,
75],
[38, 836, 222, 635, 388, 936, 793, 187, 803,
227, 561, 481, 635, 9, 437, 922, 86, 272, 439,
452],
[303, 667, 784, 818, 908, 142, 768, 342, 350,
959, 210, 494, 592, 918, 494, 108, 795, 617,
169, 142],
[344, 618, 79, 320, 667, 726, 960, 900, 525,
776, 549, 292, 938, 390, 975, 423, 555, 963,
965, 440],
[220, 497, 705, 449, 161, 225, 73, 164, 796,
438, 978, 623, 304, 917, 584, 118, 700, 222,
476, 825],
[896, 392, 14, 489, 226, 742, 932, 303, 767,
487, 859, 637, 327, 399, 804, 304, 922, 119,
687, 755]]),
[572, 40, 328, 370, 500, 359, 678, 378, 538, 858, 934,
597, 558, 719, 33, 895, 744, 664, 144, 942, 611, 346,
799, 597, 807, 838, 103, 643, 210, 525, 34, 312, 14,
75, 452, 142, 440, 825, 755, 687, 119, 922, 304, 804,
399, 327, 637, 859, 487, 767, 303, 932, 742, 226, 489,
14, 392, 896, 220, 344, 303, 38, 128, 635, 370, 461,
943, 536, 432, 782, 258, 505, 465, 1000, 899, 866,
855, 310, 833, 63, 797, 898, 803, 651, 882, 732, 735,
675, 3, 262, 223, 173, 342, 85, 858, 525, 88, 959,
246, 952, 664, 819, 323, 262, 368, 303, 407, 439, 169,
965, 476, 222, 700, 118, 584, 917, 304, 623, 978, 438,
796, 164, 73, 225, 161, 449, 705, 497, 618, 667, 836,
96, 472, 556, 589, 974, 668, 454, 59, 37, 112, 650,
625, 287, 100, 560, 479, 542, 148, 688, 447, 575, 738,
640, 312, 25, 231, 757, 683, 260, 727, 260, 382, 37,
841, 197, 131, 104, 669, 634, 745, 832, 272, 617, 963,
555, 423, 975, 390, 938, 292, 549, 776, 525, 900, 960,
726, 667, 320, 79, 784, 222, 532, 7, 920, 200, 501,
164, 425, 179, 320, 776, 818, 581, 457, 792, 537, 711,
735, 189, 665, 68, 774, 132, 208, 510, 10, 797, 318,
59, 687, 343, 730, 657, 345, 623, 918, 671, 851, 86,
795, 108, 494, 918, 592, 494, 210, 959, 350, 342, 768,
142, 908, 818, 635, 485, 883, 122, 794, 727, 476, 198,
588, 581, 421, 258, 110, 531, 816, 811, 259, 429, 56,
497, 701, 350, 938, 112, 425, 679, 852, 386, 828, 24,
347, 531, 610, 686, 922, 437, 9, 635, 481, 561, 227,
803, 187, 793, 936, 388, 66, 622, 82, 521, 825, 477,
425, 996, 567, 332, 521, 824, 55, 871, 114, 715, 725,
882, 579, 481, 163, 188, 262, 82, 787, 663, 47, 571,
34, 724, 618, 732, 124, 850, 848, 829, 373, 665, 910,
952, 39, 510, 667, 305, 675, 950, 77, 948, 540, 28,
560, 911, 307, 508, 885, 942, 111, 643, 717, 388, 418,
242, 877, 887, 933, 318, 959, 757, 628, 167, 913, 875,
909, 37, 425, 607, 688, 629, 34, 975, 619, 263, 952,
264, 792, 238, 213, 87, 245, 124, 52, 133, 990, 997,
263, 813, 666, 922, 684, 283, 507, 437, 477, 836, 947,
655, 985, 824, 117, 301])
def test_snail_073(self):
self.assertEqual(snail([[785, 373, 215, 440], [948, 869, 882, 65],
[236, 227, 508, 450], [46, 69, 45, 237]]),
[785, 373, 215, 440, 65, 450, 237, 45, 69, 46, 236,
948, 869, 882, 508, 227])
def test_snail_074(self):
self.assertEqual(snail([[319, 115, 440, 26, 579, 418, 402, 165, 517,
784, 878, 694, 93, 128, 44],
[852, 607, 878, 871, 517, 532, 992, 374, 11, 98,
518, 711, 147, 227, 506],
[201, 469, 258, 872, 604, 990, 830, 450, 143,
19, 552, 694, 210, 758, 103],
[716, 320, 227, 464, 249, 476, 868, 589, 739,
445, 2, 718, 961, 95, 220],
[928, 536, 957, 213, 258, 403, 998, 925, 940,
860, 860, 119, 145, 74, 928],
[516, 421, 697, 192, 26, 251, 294, 643, 476,
959, 442, 826, 31, 582, 629],
[542, 446, 841, 808, 696, 30, 179, 795, 269,
917, 643, 306, 284, 20, 840],
[513, 218, 830, 912, 862, 388, 741, 525, 630,
405, 631, 383, 531, 318, 426],
[434, 565, 697, 621, 308, 675, 252, 683, 842,
26, 133, 402, 692, 674, 531],
[351, 597, 455, 57, 498, 523, 349, 688, 114,
881, 103, 692, 829, 40, 375],
[630, 400, 244, 600, 467, 618, 505, 435, 821,
670, 896, 248, 743, 83, 784],
[349, 703, 796, 713, 477, 203, 15, 468, 921,
837, 517, 134, 641, 899, 504],
[690, 699, 610, 990, 139, 296, 914, 196, 333,
876, 29, 979, 869, 355, 472],
[187, 787, 932, 687, 662, 625, 759, 371, 438,
893, 838, 876, 442, 442, 697],
[454, 871, 70, 541, 598, 597, 402, 472, 327,
160, 913, 735, 518, 770, 635]]),
[319, 115, 440, 26, 579, 418, 402, 165, 517, 784, 878,
694, 93, 128, 44, 506, 103, 220, 928, 629, 840, 426,
531, 375, 784, 504, 472, 697, 635, 770, 518, 735, 913,
160, 327, 472, 402, 597, 598, 541, 70, 871, 454, 187,
690, 349, 630, 351, 434, 513, 542, 516, 928, 716, 201,
852, 607, 878, 871, 517, 532, 992, 374, 11, 98, 518,
711, 147, 227, 758, 95, 74, 582, 20, 318, 674, 40, 83,
899, 355, 442, 442, 876, 838, 893, 438, 371, 759, 625,
662, 687, 932, 787, 699, 703, 400, 597, 565, 218, 446,
421, 536, 320, 469, 258, 872, 604, 990, 830, 450, 143,
19, 552, 694, 210, 961, 145, 31, 284, 531, 692, 829,
743, 641, 869, 979, 29, 876, 333, 196, 914, 296, 139,
990, 610, 796, 244, 455, 697, 830, 841, 697, 957, 227,
464, 249, 476, 868, 589, 739, 445, 2, 718, 119, 826,
306, 383, 402, 692, 248, 134, 517, 837, 921, 468, 15,
203, 477, 713, 600, 57, 621, 912, 808, 192, 213, 258,
403, 998, 925, 940, 860, 860, 442, 643, 631, 133, 103,
896, 670, 821, 435, 505, 618, 467, 498, 308, 862, 696,
26, 251, 294, 643, 476, 959, 917, 405, 26, 881, 114,
688, 349, 523, 675, 388, 30, 179, 795, 269, 630, 842,
683, 252, 741, 525])
def test_snail_075(self):
self.assertEqual(snail([[117, 708, 570, 27, 409, 596, 355, 42, 480],
[874, 320, 499, 489, 767, 179, 912, 813, 855],
[929, 737, 403, 431, 219, 710, 107, 450, 61],
[860, 446, 119, 88, 448, 553, 833, 293, 803],
[868, 141, 930, 398, 882, 135, 585, 348, 890],
[506, 859, 833, 31, 808, 663, 384, 341, 457],
[864, 183, 143, 954, 427, 680, 940, 411, 585],
[995, 374, 784, 568, 200, 777, 468, 69, 902],
[206, 588, 712, 813, 721, 746, 11, 284, 45]]),
[117, 708, 570, 27, 409, 596, 355, 42, 480, 855, 61,
803, 890, 457, 585, 902, 45, 284, 11, 746, 721, 813,
712, 588, 206, 995, 864, 506, 868, 860, 929, 874, 320,
499, 489, 767, 179, 912, 813, 450, 293, 348, 341, 411,
69, 468, 777, 200, 568, 784, 374, 183, 859, 141, 446,
737, 403, 431, 219, 710, 107, 833, 585, 384, 940, 680,
427, 954, 143, 833, 930, 119, 88, 448, 553, 135, 663,
808, 31, 398, 882])
def test_snail_076(self):
self.assertEqual(snail([[385, 928, 460, 539, 984, 516, 609, 769, 825,
857, 819, 422, 989, 319, 60, 450, 495, 64,
624],
[604, 59, 272, 470, 997, 980, 563, 632, 353,
366, 750, 740, 395, 978, 995, 848, 72, 820,
410],
[703, 427, 351, 469, 685, 297, 362, 947, 998,
434, 896, 773, 441, 562, 785, 704, 529, 471,
798],
[564, 846, 756, 916, 435, 184, 785, 930, 349,
161, 253, 365, 82, 976, 499, 461, 398, 278,
331],
[529, 354, 643, 338, 772, 629, 726, 296, 672,
282, 268, 741, 330, 272, 217, 188, 754, 875,
58],
[869, 349, 328, 585, 442, 812, 645, 854, 317,
437, 314, 343, 571, 202, 534, 22, 307, 874,
859],
[161, 579, 865, 703, 276, 889, 374, 792, 123,
668, 970, 737, 846, 416, 704, 204, 660, 223,
509],
[243, 645, 359, 427, 636, 193, 663, 857, 712,
510, 367, 862, 352, 715, 811, 986, 292, 391,
475],
[645, 767, 117, 907, 321, 906, 592, 508, 647,
289, 307, 519, 425, 659, 219, 459, 537, 505,
328],
[743, 74, 374, 226, 356, 28, 5, 215, 459, 232,
18, 123, 308, 277, 490, 345, 68, 763, 93],
[21, 927, 770, 760, 75, 751, 387, 686, 366, 108,
327, 196, 603, 676, 337, 59, 799, 41, 699],
[777, 779, 755, 647, 718, 144, 749, 35, 282,
233, 552, 936, 391, 140, 877, 874, 472, 86,
836],
[966, 63, 26, 21, 595, 325, 521, 636, 481, 485,
664, 897, 151, 132, 969, 967, 856, 953, 425],
[352, 849, 157, 520, 272, 9, 934, 441, 261, 380,
868, 260, 375, 547, 699, 924, 794, 617, 222],
[559, 901, 435, 537, 620, 779, 708, 848, 903,
701, 570, 115, 114, 342, 57, 878, 278, 697,
629],
[541, 502, 381, 168, 792, 268, 21, 59, 581, 691,
695, 906, 616, 808, 366, 804, 36, 210, 295],
[662, 142, 571, 207, 905, 913, 414, 341, 956,
602, 115, 78, 688, 67, 148, 92, 930, 68, 258],
[301, 970, 837, 91, 679, 574, 119, 324, 554,
233, 617, 382, 876, 516, 380, 584, 516, 911,
331],
[894, 637, 193, 54, 14, 503, 221, 127, 118, 565,
234, 828, 753, 97, 257, 619, 811, 803, 934]]),
[385, 928, 460, 539, 984, 516, 609, 769, 825, 857, 819,
422, 989, 319, 60, 450, 495, 64, 624, 410, 798, 331,
58, 859, 509, 475, 328, 93, 699, 836, 425, 222, 629,
295, 258, 331, 934, 803, 811, 619, 257, 97, 753, 828,
234, 565, 118, 127, 221, 503, 14, 54, 193, 637, 894,
301, 662, 541, 559, 352, 966, 777, 21, 743, 645, 243,
161, 869, 529, 564, 703, 604, 59, 272, 470, 997, 980,
563, 632, 353, 366, 750, 740, 395, 978, 995, 848, 72,
820, 471, 278, 875, 874, 223, 391, 505, 763, 41, 86,
953, 617, 697, 210, 68, 911, 516, 584, 380, 516, 876,
382, 617, 233, 554, 324, 119, 574, 679, 91, 837, 970,
142, 502, 901, 849, 63, 779, 927, 74, 767, 645, 579,
349, 354, 846, 427, 351, 469, 685, 297, 362, 947, 998,
434, 896, 773, 441, 562, 785, 704, 529, 398, 754, 307,
660, 292, 537, 68, 799, 472, 856, 794, 278, 36, 930,
92, 148, 67, 688, 78, 115, 602, 956, 341, 414, 913,
905, 207, 571, 381, 435, 157, 26, 755, 770, 374, 117,
359, 865, 328, 643, 756, 916, 435, 184, 785, 930, 349,
161, 253, 365, 82, 976, 499, 461, 188, 22, 204, 986,
459, 345, 59, 874, 967, 924, 878, 804, 366, 808, 616,
906, 695, 691, 581, 59, 21, 268, 792, 168, 537, 520,
21, 647, 760, 226, 907, 427, 703, 585, 338, 772, 629,
726, 296, 672, 282, 268, 741, 330, 272, 217, 534, 704,
811, 219, 490, 337, 877, 969, 699, 57, 342, 114, 115,
570, 701, 903, 848, 708, 779, 620, 272, 595, 718, 75,
356, 321, 636, 276, 442, 812, 645, 854, 317, 437, 314,
343, 571, 202, 416, 715, 659, 277, 676, 140, 132, 547,
375, 260, 868, 380, 261, 441, 934, 9, 325, 144, 751,
28, 906, 193, 889, 374, 792, 123, 668, 970, 737, 846,
352, 425, 308, 603, 391, 151, 897, 664, 485, 481, 636,
521, 749, 387, 5, 592, 663, 857, 712, 510, 367, 862,
519, 123, 196, 936, 552, 233, 282, 35, 686, 215, 508,
647, 289, 307, 18, 327, 108, 366, 459, 232])
def test_snail_077(self):
self.assertEqual(snail([[666, 962, 235, 436, 68, 11, 222, 412, 346, 108,
83, 505, 615, 899, 111, 149, 740, 452, 988,
476],
[546, 18, 303, 148, 420, 385, 556, 547, 944,
980, 346, 821, 402, 114, 287, 328, 884, 420,
476, 327],
[586, 711, 282, 581, 620, 649, 276, 979, 359,
916, 897, 797, 676, 359, 510, 229, 621, 782,
559, 406],
[888, 758, 801, 266, 597, 509, 541, 501, 301,
109, 298, 676, 542, 803, 434, 40, 601, 224, 72,
387],
[402, 960, 825, 515, 400, 282, 102, 787, 226,
256, 446, 116, 926, 868, 497, 885, 645, 228,
37, 263],
[589, 332, 700, 507, 657, 509, 28, 46, 60, 615,
43, 439, 545, 382, 249, 1, 511, 411, 369, 336],
[470, 14, 533, 919, 248, 40, 292, 559, 970, 850,
609, 202, 315, 100, 52, 467, 332, 666, 620,
145],
[117, 906, 282, 526, 168, 206, 689, 213, 207,
78, 270, 186, 877, 744, 191, 86, 56, 626, 47,
777],
[491, 902, 689, 519, 278, 647, 890, 903, 351,
125, 873, 92, 510, 765, 213, 298, 972, 42, 667,
61],
[689, 759, 825, 676, 249, 697, 684, 112, 347,
73, 863, 91, 150, 311, 140, 814, 984, 838, 458,
505],
[176, 115, 727, 603, 981, 695, 255, 165, 433,
82, 576, 392, 401, 736, 469, 685, 684, 473,
599, 275],
[400, 527, 489, 949, 267, 523, 711, 642, 204,
140, 298, 162, 730, 26, 745, 748, 641, 378,
187, 208],
[424, 742, 633, 608, 645, 642, 876, 276, 408,
985, 695, 3, 772, 967, 436, 422, 333, 626, 980,
279],
[363, 401, 873, 167, 355, 259, 678, 424, 558,
957, 171, 284, 664, 517, 855, 849, 112, 470,
331, 112],
[353, 257, 463, 706, 552, 957, 255, 596, 453,
950, 352, 914, 493, 798, 735, 633, 747, 552,
368, 547],
[524, 688, 975, 145, 704, 232, 190, 483, 617,
262, 882, 782, 5, 345, 285, 483, 325, 321, 866,
806],
[99, 972, 262, 332, 81, 103, 425, 156, 240, 599,
508, 755, 783, 585, 354, 515, 694, 638, 22,
815],
[789, 616, 172, 544, 827, 862, 286, 844, 376,
844, 508, 320, 675, 197, 350, 545, 505, 78,
155, 606],
[203, 167, 992, 723, 682, 83, 534, 315, 376, 89,
267, 107, 346, 924, 306, 752, 627, 496, 994,
613],
[581, 737, 393, 879, 406, 15, 265, 238, 125,
683, 505, 835, 174, 509, 284, 12, 364, 345,
395, 1]]),
[666, 962, 235, 436, 68, 11, 222, 412, 346, 108, 83,
505, 615, 899, 111, 149, 740, 452, 988, 476, 327, 406,
387, 263, 336, 145, 777, 61, 505, 275, 208, 279, 112,
547, 806, 815, 606, 613, 1, 395, 345, 364, 12, 284,
509, 174, 835, 505, 683, 125, 238, 265, 15, 406, 879,
393, 737, 581, 203, 789, 99, 524, 353, 363, 424, 400,
176, 689, 491, 117, 470, 589, 402, 888, 586, 546, 18,
303, 148, 420, 385, 556, 547, 944, 980, 346, 821, 402,
114, 287, 328, 884, 420, 476, 559, 72, 37, 369, 620,
47, 667, 458, 599, 187, 980, 331, 368, 866, 22, 155,
994, 496, 627, 752, 306, 924, 346, 107, 267, 89, 376,
315, 534, 83, 682, 723, 992, 167, 616, 972, 688, 257,
401, 742, 527, 115, 759, 902, 906, 14, 332, 960, 758,
711, 282, 581, 620, 649, 276, 979, 359, 916, 897, 797,
676, 359, 510, 229, 621, 782, 224, 228, 411, 666, 626,
42, 838, 473, 378, 626, 470, 552, 321, 638, 78, 505,
545, 350, 197, 675, 320, 508, 844, 376, 844, 286, 862,
827, 544, 172, 262, 975, 463, 873, 633, 489, 727, 825,
689, 282, 533, 700, 825, 801, 266, 597, 509, 541, 501,
301, 109, 298, 676, 542, 803, 434, 40, 601, 645, 511,
332, 56, 972, 984, 684, 641, 333, 112, 747, 325, 694,
515, 354, 585, 783, 755, 508, 599, 240, 156, 425, 103,
81, 332, 145, 706, 167, 608, 949, 603, 676, 519, 526,
919, 507, 515, 400, 282, 102, 787, 226, 256, 446, 116,
926, 868, 497, 885, 1, 467, 86, 298, 814, 685, 748,
422, 849, 633, 483, 285, 345, 5, 782, 882, 262, 617,
483, 190, 232, 704, 552, 355, 645, 267, 981, 249, 278,
168, 248, 657, 509, 28, 46, 60, 615, 43, 439, 545,
382, 249, 52, 191, 213, 140, 469, 745, 436, 855, 735,
798, 493, 914, 352, 950, 453, 596, 255, 957, 259, 642,
523, 695, 697, 647, 206, 40, 292, 559, 970, 850, 609,
202, 315, 100, 744, 765, 311, 736, 26, 967, 517, 664,
284, 171, 957, 558, 424, 678, 876, 711, 255, 684, 890,
689, 213, 207, 78, 270, 186, 877, 510, 150, 401, 730,
772, 3, 695, 985, 408, 276, 642, 165, 112, 903, 351,
125, 873, 92, 91, 392, 162, 298, 140, 204, 433, 347,
73, 863, 576, 82])
def test_snail_078(self):
self.assertEqual(snail([[46, 105, 755, 137, 836, 162, 149, 478, 258],
[379, 307, 501, 642, 573, 610, 945, 506, 956],
[896, 309, 293, 526, 429, 298, 636, 989, 80],
[880, 153, 23, 95, 765, 124, 818, 836, 15],
[242, 510, 792, 823, 494, 479, 737, 231, 317],
[982, 293, 984, 13, 276, 39, 167, 146, 236],
[431, 546, 246, 860, 207, 380, 306, 577, 405],
[905, 276, 247, 949, 479, 6, 61, 479, 257],
[106, 744, 940, 112, 474, 457, 968, 106, 834]]),
[46, 105, 755, 137, 836, 162, 149, 478, 258, 956, 80,
15, 317, 236, 405, 257, 834, 106, 968, 457, 474, 112,
940, 744, 106, 905, 431, 982, 242, 880, 896, 379, 307,
501, 642, 573, 610, 945, 506, 989, 836, 231, 146, 577,
479, 61, 6, 479, 949, 247, 276, 546, 293, 510, 153,
309, 293, 526, 429, 298, 636, 818, 737, 167, 306, 380,
207, 860, 246, 984, 792, 23, 95, 765, 124, 479, 39,
276, 13, 823, 494])
def test_snail_079(self):
self.assertEqual(snail([[601]]), [601])
def test_snail_080(self):
self.assertEqual(snail([[536, 275, 747, 8, 428, 685, 425, 412, 645, 533,
654, 886, 275, 373, 341, 70, 650],
[756, 413, 436, 934, 70, 645, 837, 399, 729,
158, 115, 212, 529, 627, 892, 58, 619],
[518, 780, 787, 240, 167, 877, 45, 186, 204, 22,
90, 292, 440, 612, 569, 934, 587],
[409, 521, 219, 974, 972, 466, 222, 367, 656,
763, 432, 42, 652, 251, 806, 486, 266],
[144, 220, 975, 993, 678, 28, 287, 838, 236,
632, 211, 508, 380, 748, 908, 232, 311],
[59, 581, 843, 66, 293, 134, 177, 377, 903, 109,
289, 797, 915, 171, 878, 695, 826],
[5, 889, 628, 878, 997, 251, 772, 414, 963, 863,
628, 454, 971, 275, 246, 243, 471],
[589, 24, 469, 290, 96, 747, 174, 213, 135, 156,
578, 50, 459, 831, 340, 571, 230],
[943, 385, 307, 46, 800, 569, 449, 634, 899,
355, 786, 960, 487, 267, 905, 890, 626],
[382, 736, 909, 496, 936, 828, 684, 105, 44,
589, 477, 225, 434, 649, 419, 273, 447],
[629, 814, 94, 606, 640, 272, 12, 589, 421, 327,
552, 249, 232, 32, 713, 179, 812],
[432, 535, 53, 10, 658, 475, 431, 61, 830, 716,
173, 797, 766, 76, 537, 4, 156],
[360, 671, 868, 928, 838, 705, 359, 128, 397,
793, 696, 194, 904, 174, 818, 833, 48],
[789, 821, 540, 10, 356, 267, 847, 783, 914,
391, 93, 977, 426, 780, 755, 35, 914],
[862, 389, 478, 904, 272, 512, 567, 469, 802,
492, 300, 137, 908, 585, 72, 928, 133],
[29, 33, 792, 193, 910, 404, 972, 524, 301, 32,
385, 813, 353, 322, 112, 606, 138],
[930, 878, 888, 703, 28, 422, 72, 939, 971, 551,
49, 363, 45, 723, 659, 553, 133]]),
[536, 275, 747, 8, 428, 685, 425, 412, 645, 533, 654,
886, 275, 373, 341, 70, 650, 619, 587, 266, 311, 826,
471, 230, 626, 447, 812, 156, 48, 914, 133, 138, 133,
553, 659, 723, 45, 363, 49, 551, 971, 939, 72, 422,
28, 703, 888, 878, 930, 29, 862, 789, 360, 432, 629,
382, 943, 589, 5, 59, 144, 409, 518, 756, 413, 436,
934, 70, 645, 837, 399, 729, 158, 115, 212, 529, 627,
892, 58, 934, 486, 232, 695, 243, 571, 890, 273, 179,
4, 833, 35, 928, 606, 112, 322, 353, 813, 385, 32,
301, 524, 972, 404, 910, 193, 792, 33, 389, 821, 671,
535, 814, 736, 385, 24, 889, 581, 220, 521, 780, 787,
240, 167, 877, 45, 186, 204, 22, 90, 292, 440, 612,
569, 806, 908, 878, 246, 340, 905, 419, 713, 537, 818,
755, 72, 585, 908, 137, 300, 492, 802, 469, 567, 512,
272, 904, 478, 540, 868, 53, 94, 909, 307, 469, 628,
843, 975, 219, 974, 972, 466, 222, 367, 656, 763, 432,
42, 652, 251, 748, 171, 275, 831, 267, 649, 32, 76,
174, 780, 426, 977, 93, 391, 914, 783, 847, 267, 356,
10, 928, 10, 606, 496, 46, 290, 878, 66, 993, 678, 28,
287, 838, 236, 632, 211, 508, 380, 915, 971, 459, 487,
434, 232, 766, 904, 194, 696, 793, 397, 128, 359, 705,
838, 658, 640, 936, 800, 96, 997, 293, 134, 177, 377,
903, 109, 289, 797, 454, 50, 960, 225, 249, 797, 173,
716, 830, 61, 431, 475, 272, 828, 569, 747, 251, 772,
414, 963, 863, 628, 578, 786, 477, 552, 327, 421, 589,
12, 684, 449, 174, 213, 135, 156, 355, 589, 44, 105,
634, 899])
def test_snail_081(self):
self.assertEqual(snail([[350, 303, 624, 66, 319, 723, 677],
[44, 616, 64, 859, 683, 425, 556],
[551, 592, 382, 678, 823, 63, 881],
[956, 8, 601, 384, 191, 811, 32],
[815, 931, 592, 1, 230, 786, 446],
[597, 948, 908, 590, 858, 850, 974],
[533, 204, 906, 134, 27, 882, 14]]),
[350, 303, 624, 66, 319, 723, 677, 556, 881, 32, 446,
974, 14, 882, 27, 134, 906, 204, 533, 597, 815, 956,
551, 44, 616, 64, 859, 683, 425, 63, 811, 786, 850,
858, 590, 908, 948, 931, 8, 592, 382, 678, 823, 191,
230, 1, 592, 601, 384])
def test_snail_082(self):
self.assertEqual(snail([[232, 617, 922, 488, 792, 289, 488, 451, 844,
714, 179, 844, 377, 576, 242],
[638, 339, 530, 807, 417, 862, 71, 352, 963, 49,
822, 978, 162, 924, 461],
[148, 636, 263, 858, 140, 630, 654, 304, 258,
412, 64, 1, 326, 311, 284],
[573, 758, 197, 133, 766, 783, 550, 324, 290,
293, 337, 479, 415, 587, 133],
[798, 60, 844, 834, 303, 77, 121, 339, 885, 691,
848, 272, 484, 758, 485],
[364, 590, 109, 407, 502, 59, 341, 166, 109,
382, 90, 226, 362, 299, 761],
[308, 771, 541, 867, 867, 886, 671, 302, 894,
517, 713, 570, 682, 603, 209],
[512, 975, 223, 313, 340, 376, 751, 76, 332, 80,
81, 581, 401, 768, 373],
[933, 400, 289, 659, 875, 869, 5, 606, 314, 989,
494, 403, 925, 341, 839],
[470, 504, 721, 26, 765, 821, 985, 750, 905,
938, 975, 950, 288, 196, 603],
[977, 989, 127, 507, 747, 679, 671, 797, 819,
775, 439, 994, 870, 970, 164],
[761, 489, 634, 539, 499, 48, 61, 515, 675, 8,
194, 621, 828, 638, 801],
[366, 575, 629, 798, 838, 201, 769, 989, 507,
142, 3, 561, 225, 282, 604],
[122, 776, 797, 161, 244, 963, 385, 715, 120,
321, 752, 489, 233, 904, 843],
[739, 637, 324, 232, 751, 507, 800, 548, 486,
781, 554, 267, 721, 845, 6]]),
[232, 617, 922, 488, 792, 289, 488, 451, 844, 714, 179,
844, 377, 576, 242, 461, 284, 133, 485, 761, 209, 373,
839, 603, 164, 801, 604, 843, 6, 845, 721, 267, 554,
781, 486, 548, 800, 507, 751, 232, 324, 637, 739, 122,
366, 761, 977, 470, 933, 512, 308, 364, 798, 573, 148,
638, 339, 530, 807, 417, 862, 71, 352, 963, 49, 822,
978, 162, 924, 311, 587, 758, 299, 603, 768, 341, 196,
970, 638, 282, 904, 233, 489, 752, 321, 120, 715, 385,
963, 244, 161, 797, 776, 575, 489, 989, 504, 400, 975,
771, 590, 60, 758, 636, 263, 858, 140, 630, 654, 304,
258, 412, 64, 1, 326, 415, 484, 362, 682, 401, 925,
288, 870, 828, 225, 561, 3, 142, 507, 989, 769, 201,
838, 798, 629, 634, 127, 721, 289, 223, 541, 109, 844,
197, 133, 766, 783, 550, 324, 290, 293, 337, 479, 272,
226, 570, 581, 403, 950, 994, 621, 194, 8, 675, 515,
61, 48, 499, 539, 507, 26, 659, 313, 867, 407, 834,
303, 77, 121, 339, 885, 691, 848, 90, 713, 81, 494,
975, 439, 775, 819, 797, 671, 679, 747, 765, 875, 340,
867, 502, 59, 341, 166, 109, 382, 517, 80, 989, 938,
905, 750, 985, 821, 869, 376, 886, 671, 302, 894, 332,
314, 606, 5, 751, 76])
def test_snail_083(self):
self.assertEqual(snail([[144, 568, 21, 727, 740, 122, 743, 378, 519,
294, 987, 449, 688, 91],
[552, 330, 129, 484, 303, 770, 794, 409, 32,
995, 764, 458, 386, 946],
[232, 385, 662, 477, 897, 597, 969, 609, 361,
529, 422, 18, 645, 653],
[819, 179, 340, 828, 667, 374, 420, 151, 671,
281, 326, 381, 172, 12],
[695, 954, 432, 746, 292, 212, 544, 792, 75,
976, 26, 534, 887, 349],
[313, 163, 954, 749, 295, 980, 883, 133, 74,
156, 703, 232, 232, 743],
[417, 501, 155, 131, 331, 691, 333, 873, 964,
12, 447, 684, 455, 434],
[135, 141, 836, 947, 767, 389, 477, 646, 470,
281, 296, 182, 898, 681],
[373, 491, 318, 425, 872, 981, 276, 414, 883,
170, 585, 494, 993, 789],
[419, 172, 127, 49, 377, 384, 279, 958, 572,
535, 777, 121, 226, 728],
[683, 986, 545, 205, 129, 816, 117, 474, 159,
577, 380, 149, 42, 360],
[317, 525, 283, 558, 762, 813, 230, 435, 944,
500, 260, 211, 728, 666],
[858, 49, 772, 565, 195, 376, 938, 653, 992, 54,
819, 552, 93, 486],
[246, 99, 795, 769, 705, 916, 422, 117, 882, 41,
23, 612, 426, 556]]),
[144, 568, 21, 727, 740, 122, 743, 378, 519, 294, 987,
449, 688, 91, 946, 653, 12, 349, 743, 434, 681, 789,
728, 360, 666, 486, 556, 426, 612, 23, 41, 882, 117,
422, 916, 705, 769, 795, 99, 246, 858, 317, 683, 419,
373, 135, 417, 313, 695, 819, 232, 552, 330, 129, 484,
303, 770, 794, 409, 32, 995, 764, 458, 386, 645, 172,
887, 232, 455, 898, 993, 226, 42, 728, 93, 552, 819,
54, 992, 653, 938, 376, 195, 565, 772, 49, 525, 986,
172, 491, 141, 501, 163, 954, 179, 385, 662, 477, 897,
597, 969, 609, 361, 529, 422, 18, 381, 534, 232, 684,
182, 494, 121, 149, 211, 260, 500, 944, 435, 230, 813,
762, 558, 283, 545, 127, 318, 836, 155, 954, 432, 340,
828, 667, 374, 420, 151, 671, 281, 326, 26, 703, 447,
296, 585, 777, 380, 577, 159, 474, 117, 816, 129, 205,
49, 425, 947, 131, 749, 746, 292, 212, 544, 792, 75,
976, 156, 12, 281, 170, 535, 572, 958, 279, 384, 377,
872, 767, 331, 295, 980, 883, 133, 74, 964, 470, 883,
414, 276, 981, 389, 691, 333, 873, 646, 477])
def test_snail_084(self):
self.assertEqual(snail([[189, 117, 130, 56, 993, 906, 843, 983, 823,
485, 420, 275, 333, 394, 68, 33],
[818, 830, 502, 978, 273, 428, 157, 621, 121,
411, 509, 279, 263, 56, 108, 82],
[632, 484, 962, 408, 95, 161, 463, 823, 500,
110, 616, 113, 355, 800, 916, 304],
[146, 217, 702, 32, 929, 794, 249, 734, 284,
757, 354, 826, 842, 992, 651, 820],
[644, 413, 389, 168, 871, 136, 95, 987, 101,
790, 634, 771, 802, 35, 528, 248],
[636, 81, 890, 390, 966, 16, 584, 150, 112, 563,
432, 522, 231, 817, 111, 490],
[572, 77, 887, 337, 985, 822, 83, 788, 986, 767,
996, 442, 328, 24, 906, 496],
[889, 781, 904, 723, 475, 507, 809, 682, 839,
436, 614, 415, 490, 892, 778, 879],
[423, 699, 788, 677, 630, 121, 568, 397, 366,
495, 850, 43, 181, 296, 671, 181],
[849, 828, 840, 490, 665, 921, 666, 346, 315,
287, 347, 527, 346, 38, 599, 743],
[196, 68, 364, 681, 321, 104, 86, 948, 393, 201,
470, 539, 459, 60, 156, 742],
[820, 525, 485, 892, 653, 694, 287, 887, 729,
75, 466, 354, 568, 850, 732, 654],
[670, 174, 472, 262, 890, 410, 362, 234, 335,
92, 451, 167, 706, 177, 955, 612],
[647, 138, 198, 265, 541, 673, 41, 818, 564,
863, 932, 552, 245, 71, 535, 289],
[626, 514, 854, 694, 783, 469, 674, 473, 537,
157, 546, 891, 615, 399, 547, 699],
[662, 868, 468, 922, 99, 268, 120, 280, 983,
586, 712, 206, 750, 43, 640, 116]]),
[189, 117, 130, 56, 993, 906, 843, 983, 823, 485, 420,
275, 333, 394, 68, 33, 82, 304, 820, 248, 490, 496,
879, 181, 743, 742, 654, 612, 289, 699, 116, 640, 43,
750, 206, 712, 586, 983, 280, 120, 268, 99, 922, 468,
868, 662, 626, 647, 670, 820, 196, 849, 423, 889, 572,
636, 644, 146, 632, 818, 830, 502, 978, 273, 428, 157,
621, 121, 411, 509, 279, 263, 56, 108, 916, 651, 528,
111, 906, 778, 671, 599, 156, 732, 955, 535, 547, 399,
615, 891, 546, 157, 537, 473, 674, 469, 783, 694, 854,
514, 138, 174, 525, 68, 828, 699, 781, 77, 81, 413,
217, 484, 962, 408, 95, 161, 463, 823, 500, 110, 616,
113, 355, 800, 992, 35, 817, 24, 892, 296, 38, 60,
850, 177, 71, 245, 552, 932, 863, 564, 818, 41, 673,
541, 265, 198, 472, 485, 364, 840, 788, 904, 887, 890,
389, 702, 32, 929, 794, 249, 734, 284, 757, 354, 826,
842, 802, 231, 328, 490, 181, 346, 459, 568, 706, 167,
451, 92, 335, 234, 362, 410, 890, 262, 892, 681, 490,
677, 723, 337, 390, 168, 871, 136, 95, 987, 101, 790,
634, 771, 522, 442, 415, 43, 527, 539, 354, 466, 75,
729, 887, 287, 694, 653, 321, 665, 630, 475, 985, 966,
16, 584, 150, 112, 563, 432, 996, 614, 850, 347, 470,
201, 393, 948, 86, 104, 921, 121, 507, 822, 83, 788,
986, 767, 436, 495, 287, 315, 346, 666, 568, 809, 682,
839, 366, 397])
def test_snail_085(self):
self.assertEqual(snail([[830, 253, 625, 973, 491, 433, 340, 950, 941],
[594, 74, 780, 39, 840, 620, 979, 117, 869],
[382, 233, 384, 673, 659, 501, 886, 415, 947],
[608, 73, 246, 530, 429, 506, 573, 552, 505],
[482, 346, 767, 910, 939, 200, 398, 831, 979],
[382, 723, 528, 936, 648, 300, 650, 564, 823],
[68, 395, 220, 265, 125, 912, 658, 384, 764],
[934, 378, 160, 882, 201, 23, 74, 287, 899],
[927, 183, 846, 677, 44, 118, 611, 685, 902]]),
[830, 253, 625, 973, 491, 433, 340, 950, 941, 869, 947,
505, 979, 823, 764, 899, 902, 685, 611, 118, 44, 677,
846, 183, 927, 934, 68, 382, 482, 608, 382, 594, 74,
780, 39, 840, 620, 979, 117, 415, 552, 831, 564, 384,
287, 74, 23, 201, 882, 160, 378, 395, 723, 346, 73,
233, 384, 673, 659, 501, 886, 573, 398, 650, 658, 912,
125, 265, 220, 528, 767, 246, 530, 429, 506, 200, 300,
648, 936, 910, 939])
def test_snail_086(self):
self.assertEqual(snail([[138, 741, 417, 159, 204, 874, 124, 662, 424,
454, 485, 437, 98, 149, 674, 523, 367],
[743, 936, 693, 93, 718, 309, 249, 664, 820,
647, 450, 8, 94, 521, 633, 704, 11],
[111, 422, 331, 297, 369, 809, 16, 991, 98, 871,
429, 887, 906, 706, 16, 576, 635],
[897, 820, 362, 595, 369, 495, 206, 231, 632,
842, 225, 550, 663, 364, 556, 586, 890],
[370, 949, 756, 447, 637, 326, 862, 636, 342,
228, 582, 876, 52, 210, 298, 922, 59],
[842, 58, 731, 756, 306, 879, 101, 280, 670,
612, 636, 200, 613, 915, 21, 756, 625],
[22, 850, 811, 476, 515, 485, 579, 707, 730,
343, 492, 324, 588, 692, 954, 339, 674],
[851, 681, 842, 320, 85, 795, 471, 779, 259,
472, 217, 985, 410, 371, 408, 401, 649],
[581, 45, 563, 964, 455, 888, 78, 345, 479, 891,
302, 874, 477, 740, 634, 339, 13],
[794, 39, 180, 191, 463, 573, 704, 333, 920,
508, 373, 622, 378, 615, 1, 778, 186],
[850, 537, 431, 4, 427, 172, 687, 344, 35, 847,
745, 818, 394, 935, 796, 428, 562],
[487, 80, 446, 506, 159, 277, 773, 958, 222,
805, 906, 369, 807, 59, 834, 866, 923],
[169, 750, 548, 544, 7, 987, 629, 344, 516, 916,
352, 303, 926, 466, 44, 635, 703],
[956, 621, 626, 932, 280, 837, 977, 529, 761,
567, 636, 162, 415, 12, 347, 336, 835],
[544, 924, 11, 155, 311, 602, 943, 30, 742, 627,
21, 905, 443, 295, 369, 462, 617],
[445, 648, 244, 728, 706, 492, 740, 402, 226,
605, 748, 201, 717, 135, 785, 306, 535],
[827, 874, 302, 890, 902, 317, 511, 214, 761,
852, 180, 653, 300, 780, 147, 744, 661]]),
[138, 741, 417, 159, 204, 874, 124, 662, 424, 454, 485,
437, 98, 149, 674, 523, 367, 11, 635, 890, 59, 625,
674, 649, 13, 186, 562, 923, 703, 835, 617, 535, 661,
744, 147, 780, 300, 653, 180, 852, 761, 214, 511, 317,
902, 890, 302, 874, 827, 445, 544, 956, 169, 487, 850,
794, 581, 851, 22, 842, 370, 897, 111, 743, 936, 693,
93, 718, 309, 249, 664, 820, 647, 450, 8, 94, 521,
633, 704, 576, 586, 922, 756, 339, 401, 339, 778, 428,
866, 635, 336, 462, 306, 785, 135, 717, 201, 748, 605,
226, 402, 740, 492, 706, 728, 244, 648, 924, 621, 750,
80, 537, 39, 45, 681, 850, 58, 949, 820, 422, 331,
297, 369, 809, 16, 991, 98, 871, 429, 887, 906, 706,
16, 556, 298, 21, 954, 408, 634, 1, 796, 834, 44, 347,
369, 295, 443, 905, 21, 627, 742, 30, 943, 602, 311,
155, 11, 626, 548, 446, 431, 180, 563, 842, 811, 731,
756, 362, 595, 369, 495, 206, 231, 632, 842, 225, 550,
663, 364, 210, 915, 692, 371, 740, 615, 935, 59, 466,
12, 415, 162, 636, 567, 761, 529, 977, 837, 280, 932,
544, 506, 4, 191, 964, 320, 476, 756, 447, 637, 326,
862, 636, 342, 228, 582, 876, 52, 613, 588, 410, 477,
378, 394, 807, 926, 303, 352, 916, 516, 344, 629, 987,
7, 159, 427, 463, 455, 85, 515, 306, 879, 101, 280,
670, 612, 636, 200, 324, 985, 874, 622, 818, 369, 906,
805, 222, 958, 773, 277, 172, 573, 888, 795, 485, 579,
707, 730, 343, 492, 217, 302, 373, 745, 847, 35, 344,
687, 704, 78, 471, 779, 259, 472, 891, 508, 920, 333,
345, 479])
def test_snail_087(self):
self.assertEqual(snail([[525, 36, 964], [45, 650, 15], [487, 52, 333]]),
[525, 36, 964, 15, 333, 52, 487, 45, 650])
def test_snail_088(self):
self.assertEqual(snail(
[[708, 433, 591, 277, 518, 564, 794, 454, 666, 595, 254, 870],
[886, 628, 399, 222, 594, 65, 44, 567, 666, 356, 421, 594],
[612, 162, 307, 42, 245, 10, 620, 187, 413, 350, 972, 964],
[669, 577, 98, 332, 950, 236, 393, 682, 764, 432, 849, 338],
[495, 741, 186, 613, 557, 277, 861, 390, 228, 247, 535, 87],
[485, 401, 526, 512, 170, 157, 575, 284, 814, 215, 897, 797],
[447, 679, 519, 8, 329, 991, 568, 414, 967, 643, 213, 426],
[139, 739, 730, 456, 961, 236, 501, 409, 293, 812, 141, 773],
[930, 834, 528, 146, 556, 677, 991, 143, 911, 118, 301, 165],
[467, 666, 632, 210, 394, 124, 348, 615, 620, 216, 340, 549],
[116, 624, 238, 45, 716, 894, 380, 921, 708, 998, 910, 844],
[790, 25, 61, 218, 901, 200, 833, 57, 472, 464, 523, 573]]),
[708, 433, 591, 277, 518, 564, 794, 454, 666, 595, 254,
870, 594, 964, 338, 87, 797, 426, 773, 165, 549, 844,
573, 523, 464, 472, 57, 833, 200, 901, 218, 61, 25,
790, 116, 467, 930, 139, 447, 485, 495, 669, 612, 886,
628, 399, 222, 594, 65, 44, 567, 666, 356, 421, 972,
849, 535, 897, 213, 141, 301, 340, 910, 998, 708, 921,
380, 894, 716, 45, 238, 624, 666, 834, 739, 679, 401,
741, 577, 162, 307, 42, 245, 10, 620, 187, 413, 350,
432, 247, 215, 643, 812, 118, 216, 620, 615, 348, 124,
394, 210, 632, 528, 730, 519, 526, 186, 98, 332, 950,
236, 393, 682, 764, 228, 814, 967, 293, 911, 143, 991,
677, 556, 146, 456, 8, 512, 613, 557, 277, 861, 390,
284, 414, 409, 501, 236, 961, 329, 170, 157, 575, 568,
991])
def test_snail_089(self):
self.assertEqual(snail([[293, 385, 292, 757, 361, 655, 659, 966, 615,
684, 335, 393, 474],
[478, 315, 712, 147, 750, 338, 9, 707, 159, 91,
170, 85, 27],
[499, 881, 299, 789, 431, 756, 734, 872, 384,
26, 520, 489, 819],
[801, 283, 442, 398, 640, 355, 827, 403, 368,
238, 481, 404, 108],
[295, 858, 223, 425, 139, 752, 720, 679, 43,
976, 817, 77, 607],
[877, 392, 230, 626, 447, 760, 462, 860, 519,
976, 935, 473, 237],
[341, 760, 55, 653, 403, 84, 673, 64, 997, 241,
957, 851, 858],
[946, 296, 603, 287, 348, 837, 508, 741, 461,
920, 693, 131, 343],
[584, 127, 8, 16, 54, 563, 356, 193, 904, 812,
588, 915, 556],
[318, 934, 918, 261, 821, 590, 962, 870, 590,
99, 658, 259, 484],
[350, 129, 627, 13, 625, 760, 268, 552, 427,
303, 818, 860, 190],
[944, 524, 876, 51, 827, 34, 526, 269, 903, 5,
902, 246, 609],
[51, 555, 781, 892, 378, 981, 385, 744, 956,
684, 390, 384, 689]]),
[293, 385, 292, 757, 361, 655, 659, 966, 615, 684, 335,
393, 474, 27, 819, 108, 607, 237, 858, 343, 556, 484,
190, 609, 689, 384, 390, 684, 956, 744, 385, 981, 378,
892, 781, 555, 51, 944, 350, 318, 584, 946, 341, 877,
295, 801, 499, 478, 315, 712, 147, 750, 338, 9, 707,
159, 91, 170, 85, 489, 404, 77, 473, 851, 131, 915,
259, 860, 246, 902, 5, 903, 269, 526, 34, 827, 51,
876, 524, 129, 934, 127, 296, 760, 392, 858, 283, 881,
299, 789, 431, 756, 734, 872, 384, 26, 520, 481, 817,
935, 957, 693, 588, 658, 818, 303, 427, 552, 268, 760,
625, 13, 627, 918, 8, 603, 55, 230, 223, 442, 398,
640, 355, 827, 403, 368, 238, 976, 976, 241, 920, 812,
99, 590, 870, 962, 590, 821, 261, 16, 287, 653, 626,
425, 139, 752, 720, 679, 43, 519, 997, 461, 904, 193,
356, 563, 54, 348, 403, 447, 760, 462, 860, 64, 741,
508, 837, 84, 673])
def test_snail_090(self):
self.assertEqual(snail(
[[232, 704, 47, 593, 30, 394, 932, 781, 504, 760],
[319, 806, 940, 647, 412, 710, 335, 109, 34, 130],
[447, 743, 128, 497, 547, 155, 153, 676, 930, 401],
[502, 42, 815, 824, 166, 627, 893, 500, 753, 136],
[343, 384, 627, 328, 944, 841, 583, 899, 598, 558],
[260, 468, 889, 544, 526, 498, 749, 87, 741, 862],
[323, 885, 390, 955, 965, 997, 974, 210, 611, 890],
[598, 888, 146, 717, 118, 63, 858, 484, 310, 265],
[825, 98, 520, 519, 70, 602, 698, 662, 73, 902],
[53, 156, 588, 118, 981, 519, 39, 536, 147, 295]]),
[232, 704, 47, 593, 30, 394, 932, 781, 504, 760, 130,
401, 136, 558, 862, 890, 265, 902, 295, 147, 536, 39,
519, 981, 118, 588, 156, 53, 825, 598, 323, 260, 343,
502, 447, 319, 806, 940, 647, 412, 710, 335, 109, 34,
930, 753, 598, 741, 611, 310, 73, 662, 698, 602, 70,
519, 520, 98, 888, 885, 468, 384, 42, 743, 128, 497,
547, 155, 153, 676, 500, 899, 87, 210, 484, 858, 63,
118, 717, 146, 390, 889, 627, 815, 824, 166, 627, 893,
583, 749, 974, 997, 965, 955, 544, 328, 944, 841, 498,
526])
def test_snail_091(self):
self.assertEqual(snail([[143, 117, 15, 361, 949, 412, 30, 789, 293],
[517, 943, 527, 914, 119, 984, 619, 878, 694],
[548, 939, 625, 550, 787, 169, 633, 216, 84],
[649, 772, 533, 591, 101, 87, 115, 248, 263],
[650, 436, 659, 76, 395, 878, 470, 424, 7],
[218, 861, 265, 632, 294, 34, 433, 364, 370],
[164, 390, 869, 489, 226, 371, 295, 776, 503],
[424, 284, 985, 209, 70, 146, 636, 797, 176],
[951, 264, 54, 144, 844, 301, 390, 678, 639]]),
[143, 117, 15, 361, 949, 412, 30, 789, 293, 694, 84,
263, 7, 370, 503, 176, 639, 678, 390, 301, 844, 144,
54, 264, 951, 424, 164, 218, 650, 649, 548, 517, 943,
527, 914, 119, 984, 619, 878, 216, 248, 424, 364, 776,
797, 636, 146, 70, 209, 985, 284, 390, 861, 436, 772,
939, 625, 550, 787, 169, 633, 115, 470, 433, 295, 371,
226, 489, 869, 265, 659, 533, 591, 101, 87, 878, 34,
294, 632, 76, 395])
def test_snail_092(self):
self.assertEqual(snail([[746, 798, 736, 433, 822, 82, 487, 32, 500, 717,
52, 320, 238, 1, 529, 973, 97, 6],
[37, 761, 26, 63, 86, 770, 670, 899, 738, 619,
967, 959, 663, 317, 395, 302, 45, 8],
[316, 296, 58, 653, 99, 271, 767, 122, 344, 644,
604, 617, 566, 441, 699, 287, 775, 950],
[60, 610, 295, 13, 143, 480, 575, 808, 896, 416,
919, 970, 163, 38, 726, 482, 18, 940],
[62, 758, 907, 148, 994, 42, 547, 843, 296, 348,
625, 884, 823, 148, 682, 358, 627, 792],
[259, 128, 820, 772, 767, 852, 3, 331, 540, 196,
560, 312, 291, 376, 162, 666, 420, 426],
[3, 362, 144, 383, 219, 657, 301, 113, 46, 771,
132, 697, 38, 486, 299, 218, 254, 692],
[397, 76, 152, 653, 161, 199, 721, 574, 729,
810, 383, 223, 626, 96, 817, 872, 945, 220],
[961, 967, 364, 568, 958, 566, 517, 693, 841,
509, 751, 713, 888, 764, 200, 16, 501, 524],
[195, 523, 624, 652, 113, 281, 983, 472, 482,
681, 206, 733, 742, 985, 226, 207, 699, 575],
[533, 22, 231, 758, 951, 649, 867, 381, 734,
538, 733, 814, 392, 86, 322, 758, 550, 42],
[775, 777, 347, 884, 298, 126, 826, 595, 151,
560, 119, 375, 416, 94, 245, 659, 238, 229],
[562, 956, 429, 47, 922, 974, 953, 624, 583, 40,
507, 752, 446, 779, 788, 952, 354, 798],
[495, 809, 528, 500, 277, 35, 827, 486, 12, 453,
73, 91, 129, 810, 828, 698, 401, 824],
[549, 11, 22, 952, 943, 177, 372, 535, 785, 315,
879, 864, 606, 945, 720, 960, 997, 482],
[810, 804, 171, 44, 552, 317, 293, 863, 71, 416,
294, 995, 267, 152, 973, 777, 936, 790],
[264, 874, 485, 179, 373, 496, 786, 520, 437,
363, 619, 97, 895, 21, 9, 170, 757, 855],
[699, 372, 806, 630, 759, 884, 175, 149, 192,
684, 457, 237, 46, 938, 845, 207, 265, 114]]),
[746, 798, 736, 433, 822, 82, 487, 32, 500, 717, 52,
320, 238, 1, 529, 973, 97, 6, 8, 950, 940, 792, 426,
692, 220, 524, 575, 42, 229, 798, 824, 482, 790, 855,
114, 265, 207, 845, 938, 46, 237, 457, 684, 192, 149,
175, 884, 759, 630, 806, 372, 699, 264, 810, 549, 495,
562, 775, 533, 195, 961, 397, 3, 259, 62, 60, 316, 37,
761, 26, 63, 86, 770, 670, 899, 738, 619, 967, 959,
663, 317, 395, 302, 45, 775, 18, 627, 420, 254, 945,
501, 699, 550, 238, 354, 401, 997, 936, 757, 170, 9,
21, 895, 97, 619, 363, 437, 520, 786, 496, 373, 179,
485, 874, 804, 11, 809, 956, 777, 22, 523, 967, 76,
362, 128, 758, 610, 296, 58, 653, 99, 271, 767, 122,
344, 644, 604, 617, 566, 441, 699, 287, 482, 358, 666,
218, 872, 16, 207, 758, 659, 952, 698, 960, 777, 973,
152, 267, 995, 294, 416, 71, 863, 293, 317, 552, 44,
171, 22, 528, 429, 347, 231, 624, 364, 152, 144, 820,
907, 295, 13, 143, 480, 575, 808, 896, 416, 919, 970,
163, 38, 726, 682, 162, 299, 817, 200, 226, 322, 245,
788, 828, 720, 945, 606, 864, 879, 315, 785, 535, 372,
177, 943, 952, 500, 47, 884, 758, 652, 568, 653, 383,
772, 148, 994, 42, 547, 843, 296, 348, 625, 884, 823,
148, 376, 486, 96, 764, 985, 86, 94, 779, 810, 129,
91, 73, 453, 12, 486, 827, 35, 277, 922, 298, 951,
113, 958, 161, 219, 767, 852, 3, 331, 540, 196, 560,
312, 291, 38, 626, 888, 742, 392, 416, 446, 752, 507,
40, 583, 624, 953, 974, 126, 649, 281, 566, 199, 657,
301, 113, 46, 771, 132, 697, 223, 713, 733, 814, 375,
119, 560, 151, 595, 826, 867, 983, 517, 721, 574, 729,
810, 383, 751, 206, 733, 538, 734, 381, 472, 693, 841,
509, 681, 482])
def test_snail_093(self):
self.assertEqual(snail(
[[600, 705, 630, 641, 878], [29, 799, 688, 274, 954],
[642, 533, 486, 590, 446], [386, 706, 769, 884, 88],
[40, 949, 713, 263, 542]]),
[600, 705, 630, 641, 878, 954, 446, 88, 542, 263, 713,
949, 40, 386, 642, 29, 799, 688, 274, 590, 884, 769,
706, 533, 486])
def test_snail_094(self):
self.assertEqual(snail([[907, 736, 956, 378, 282, 128, 890, 360, 476,
774, 662, 76, 440, 146, 260, 503, 594, 753,
601, 758],
[154, 508, 696, 345, 591, 993, 883, 517, 744,
441, 519, 59, 241, 932, 612, 853, 681, 580,
189, 616],
[252, 776, 960, 174, 414, 719, 865, 586, 514,
147, 885, 941, 624, 902, 920, 281, 788, 666,
645, 885],
[449, 571, 489, 461, 175, 497, 36, 529, 700,
833, 843, 360, 129, 148, 858, 887, 601, 368,
850, 693],
[490, 627, 711, 829, 183, 271, 1, 656, 384, 296,
344, 478, 251, 806, 930, 50, 586, 526, 851,
77],
[567, 200, 336, 555, 849, 867, 680, 585, 750,
865, 85, 520, 229, 438, 712, 500, 846, 37, 795,
591],
[861, 605, 491, 552, 577, 316, 923, 18, 304,
237, 105, 132, 420, 458, 716, 500, 745, 488,
419, 900],
[212, 651, 759, 532, 972, 701, 178, 862, 383,
683, 647, 361, 804, 619, 203, 834, 177, 789,
648, 172],
[824, 811, 117, 519, 107, 371, 638, 841, 931,
397, 381, 19, 625, 501, 644, 332, 421, 616, 9,
451],
[626, 774, 505, 848, 925, 320, 635, 85, 491,
700, 980, 154, 778, 639, 893, 455, 491, 23,
488, 902],
[842, 984, 751, 823, 776, 261, 411, 239, 490,
828, 123, 248, 555, 308, 340, 997, 464, 901,
931, 278],
[826, 354, 702, 996, 583, 195, 939, 381, 649,
830, 991, 167, 337, 328, 237, 120, 678, 64,
943, 232],
[393, 920, 761, 889, 953, 112, 493, 891, 517,
937, 212, 549, 37, 800, 902, 200, 45, 958, 400,
609],
[190, 982, 178, 364, 881, 102, 889, 873, 976,
191, 973, 742, 680, 718, 585, 924, 638, 936,
62, 644],
[565, 513, 575, 193, 561, 750, 953, 226, 691,
562, 655, 294, 877, 651, 343, 328, 599, 277,
883, 447],
[224, 782, 630, 902, 677, 276, 35, 489, 941,
122, 950, 593, 808, 738, 901, 228, 621, 730,
567, 484],
[252, 491, 679, 882, 157, 6, 674, 542, 384, 508,
93, 981, 502, 342, 732, 265, 135, 309, 814,
377],
[609, 16, 276, 999, 676, 620, 662, 276, 598, 79,
983, 105, 959, 328, 7, 486, 112, 484, 117,
970],
[592, 391, 807, 39, 654, 757, 676, 569, 589,
920, 935, 443, 821, 220, 406, 551, 649, 605,
753, 277],
[474, 183, 917, 831, 371, 55, 70, 631, 827, 1,
526, 648, 466, 575, 916, 776, 237, 18, 671,
244]]),
[907, 736, 956, 378, 282, 128, 890, 360, 476, 774, 662,
76, 440, 146, 260, 503, 594, 753, 601, 758, 616, 885,
693, 77, 591, 900, 172, 451, 902, 278, 232, 609, 644,
447, 484, 377, 970, 277, 244, 671, 18, 237, 776, 916,
575, 466, 648, 526, 1, 827, 631, 70, 55, 371, 831,
917, 183, 474, 592, 609, 252, 224, 565, 190, 393, 826,
842, 626, 824, 212, 861, 567, 490, 449, 252, 154, 508,
696, 345, 591, 993, 883, 517, 744, 441, 519, 59, 241,
932, 612, 853, 681, 580, 189, 645, 850, 851, 795, 419,
648, 9, 488, 931, 943, 400, 62, 883, 567, 814, 117,
753, 605, 649, 551, 406, 220, 821, 443, 935, 920, 589,
569, 676, 757, 654, 39, 807, 391, 16, 491, 782, 513,
982, 920, 354, 984, 774, 811, 651, 605, 200, 627, 571,
776, 960, 174, 414, 719, 865, 586, 514, 147, 885, 941,
624, 902, 920, 281, 788, 666, 368, 526, 37, 488, 789,
616, 23, 901, 64, 958, 936, 277, 730, 309, 484, 112,
486, 7, 328, 959, 105, 983, 79, 598, 276, 662, 620,
676, 999, 276, 679, 630, 575, 178, 761, 702, 751, 505,
117, 759, 491, 336, 711, 489, 461, 175, 497, 36, 529,
700, 833, 843, 360, 129, 148, 858, 887, 601, 586, 846,
745, 177, 421, 491, 464, 678, 45, 638, 599, 621, 135,
265, 732, 342, 502, 981, 93, 508, 384, 542, 674, 6,
157, 882, 902, 193, 364, 889, 996, 823, 848, 519, 532,
552, 555, 829, 183, 271, 1, 656, 384, 296, 344, 478,
251, 806, 930, 50, 500, 500, 834, 332, 455, 997, 120,
200, 924, 328, 228, 901, 738, 808, 593, 950, 122, 941,
489, 35, 276, 677, 561, 881, 953, 583, 776, 925, 107,
972, 577, 849, 867, 680, 585, 750, 865, 85, 520, 229,
438, 712, 716, 203, 644, 893, 340, 237, 902, 585, 343,
651, 877, 294, 655, 562, 691, 226, 953, 750, 102, 112,
195, 261, 320, 371, 701, 316, 923, 18, 304, 237, 105,
132, 420, 458, 619, 501, 639, 308, 328, 800, 718, 680,
742, 973, 191, 976, 873, 889, 493, 939, 411, 635, 638,
178, 862, 383, 683, 647, 361, 804, 625, 778, 555, 337,
37, 549, 212, 937, 517, 891, 381, 239, 85, 841, 931,
397, 381, 19, 154, 248, 167, 991, 830, 649, 490, 491,
700, 980, 123, 828])
def test_snail_095(self):
self.assertEqual(snail([[247, 36, 147, 670, 85, 302, 290, 318, 625, 571,
925, 293, 329, 386, 513, 32],
[886, 355, 260, 484, 589, 633, 64, 999, 160,
927, 937, 306, 722, 480, 171, 593],
[243, 262, 207, 601, 850, 221, 834, 478, 394, 6,
926, 500, 705, 771, 947, 559],
[894, 64, 204, 221, 196, 17, 465, 978, 251, 395,
208, 623, 457, 274, 198, 982],
[826, 24, 211, 166, 285, 800, 358, 180, 336,
708, 965, 855, 607, 283, 186, 114],
[177, 887, 42, 168, 420, 708, 632, 953, 929,
246, 355, 617, 576, 783, 892, 527],
[393, 714, 22, 905, 724, 749, 226, 128, 689,
924, 203, 353, 502, 583, 363, 249],
[633, 275, 241, 730, 109, 748, 482, 465, 672,
567, 739, 772, 677, 299, 492, 832],
[701, 706, 283, 866, 551, 893, 928, 136, 822,
892, 100, 11, 686, 759, 780, 799],
[818, 515, 137, 699, 122, 187, 587, 708, 819,
842, 689, 234, 229, 763, 484, 512],
[770, 663, 833, 676, 994, 54, 207, 133, 444,
707, 541, 23, 588, 214, 752, 980],
[121, 54, 432, 672, 767, 47, 945, 497, 433, 422,
913, 688, 703, 289, 933, 736],
[80, 683, 447, 359, 245, 935, 348, 196, 118,
637, 938, 270, 532, 97, 647, 329],
[385, 201, 425, 426, 579, 166, 983, 31, 646,
810, 156, 102, 151, 13, 212, 127],
[677, 439, 224, 931, 557, 572, 31, 122, 107,
812, 796, 934, 956, 74, 372, 311],
[807, 154, 33, 598, 333, 42, 7, 937, 312, 911,
186, 918, 962, 554, 746, 436]]),
[247, 36, 147, 670, 85, 302, 290, 318, 625, 571, 925,
293, 329, 386, 513, 32, 593, 559, 982, 114, 527, 249,
832, 799, 512, 980, 736, 329, 127, 311, 436, 746, 554,
962, 918, 186, 911, 312, 937, 7, 42, 333, 598, 33,
154, 807, 677, 385, 80, 121, 770, 818, 701, 633, 393,
177, 826, 894, 243, 886, 355, 260, 484, 589, 633, 64,
999, 160, 927, 937, 306, 722, 480, 171, 947, 198, 186,
892, 363, 492, 780, 484, 752, 933, 647, 212, 372, 74,
956, 934, 796, 812, 107, 122, 31, 572, 557, 931, 224,
439, 201, 683, 54, 663, 515, 706, 275, 714, 887, 24,
64, 262, 207, 601, 850, 221, 834, 478, 394, 6, 926,
500, 705, 771, 274, 283, 783, 583, 299, 759, 763, 214,
289, 97, 13, 151, 102, 156, 810, 646, 31, 983, 166,
579, 426, 425, 447, 432, 833, 137, 283, 241, 22, 42,
211, 204, 221, 196, 17, 465, 978, 251, 395, 208, 623,
457, 607, 576, 502, 677, 686, 229, 588, 703, 532, 270,
938, 637, 118, 196, 348, 935, 245, 359, 672, 676, 699,
866, 730, 905, 168, 166, 285, 800, 358, 180, 336, 708,
965, 855, 617, 353, 772, 11, 234, 23, 688, 913, 422,
433, 497, 945, 47, 767, 994, 122, 551, 109, 724, 420,
708, 632, 953, 929, 246, 355, 203, 739, 100, 689, 541,
707, 444, 133, 207, 54, 187, 893, 748, 749, 226, 128,
689, 924, 567, 892, 842, 819, 708, 587, 928, 482, 465,
672, 822, 136])
def test_snail_096(self):
self.assertEqual(snail([[433, 873, 34, 538, 182, 479, 447, 919, 491,
799, 321, 798, 96, 351, 199, 595, 384],
[688, 520, 440, 10, 768, 283, 286, 980, 786,
632, 724, 772, 776, 791, 526, 902, 143],
[221, 380, 963, 134, 81, 12, 212, 931, 854, 929,
258, 266, 191, 692, 975, 245, 686],
[371, 60, 849, 373, 934, 222, 750, 480, 817,
384, 623, 223, 965, 716, 502, 306, 419],
[137, 668, 412, 520, 759, 695, 35, 791, 512,
272, 880, 453, 79, 2, 813, 383, 715],
[350, 505, 927, 713, 478, 969, 462, 3, 343, 237,
219, 780, 231, 486, 539, 82, 129],
[405, 363, 901, 599, 117, 102, 317, 683, 880,
226, 757, 863, 175, 434, 903, 555, 152],
[918, 331, 443, 864, 933, 126, 463, 526, 570,
243, 866, 184, 895, 478, 413, 143, 900],
[976, 855, 41, 630, 829, 195, 443, 10, 447, 401,
592, 779, 213, 162, 359, 592, 496],
[892, 131, 875, 900, 416, 266, 524, 162, 561,
14, 148, 103, 869, 412, 229, 490, 961],
[589, 282, 373, 491, 878, 25, 541, 207, 642,
380, 971, 581, 721, 500, 135, 98, 425],
[523, 846, 203, 737, 445, 213, 138, 238, 295,
272, 338, 760, 539, 354, 195, 109, 271],
[948, 521, 513, 819, 497, 73, 487, 760, 899,
687, 330, 409, 476, 725, 3, 261, 101],
[690, 406, 882, 6, 341, 931, 135, 659, 746, 960,
709, 42, 621, 741, 6, 444, 496],
[351, 159, 223, 361, 865, 142, 82, 556, 953,
789, 642, 491, 346, 912, 262, 534, 442],
[397, 421, 707, 864, 685, 406, 76, 577, 159,
210, 885, 229, 54, 617, 945, 153, 928],
[778, 175, 280, 641, 290, 911, 692, 538, 48,
480, 772, 400, 119, 691, 539, 728, 27]]),
[433, 873, 34, 538, 182, 479, 447, 919, 491, 799, 321,
798, 96, 351, 199, 595, 384, 143, 686, 419, 715, 129,
152, 900, 496, 961, 425, 271, 101, 496, 442, 928, 27,
728, 539, 691, 119, 400, 772, 480, 48, 538, 692, 911,
290, 641, 280, 175, 778, 397, 351, 690, 948, 523, 589,
892, 976, 918, 405, 350, 137, 371, 221, 688, 520, 440,
10, 768, 283, 286, 980, 786, 632, 724, 772, 776, 791,
526, 902, 245, 306, 383, 82, 555, 143, 592, 490, 98,
109, 261, 444, 534, 153, 945, 617, 54, 229, 885, 210,
159, 577, 76, 406, 685, 864, 707, 421, 159, 406, 521,
846, 282, 131, 855, 331, 363, 505, 668, 60, 380, 963,
134, 81, 12, 212, 931, 854, 929, 258, 266, 191, 692,
975, 502, 813, 539, 903, 413, 359, 229, 135, 195, 3,
6, 262, 912, 346, 491, 642, 789, 953, 556, 82, 142,
865, 361, 223, 882, 513, 203, 373, 875, 41, 443, 901,
927, 412, 849, 373, 934, 222, 750, 480, 817, 384, 623,
223, 965, 716, 2, 486, 434, 478, 162, 412, 500, 354,
725, 741, 621, 42, 709, 960, 746, 659, 135, 931, 341,
6, 819, 737, 491, 900, 630, 864, 599, 713, 520, 759,
695, 35, 791, 512, 272, 880, 453, 79, 231, 175, 895,
213, 869, 721, 539, 476, 409, 330, 687, 899, 760, 487,
73, 497, 445, 878, 416, 829, 933, 117, 478, 969, 462,
3, 343, 237, 219, 780, 863, 184, 779, 103, 581, 760,
338, 272, 295, 238, 138, 213, 25, 266, 195, 126, 102,
317, 683, 880, 226, 757, 866, 592, 148, 971, 380, 642,
207, 541, 524, 443, 463, 526, 570, 243, 401, 14, 561,
162, 10, 447])
def test_snail_097(self):
self.assertEqual(snail(
[[631, 668, 646, 712, 825], [953, 573, 100, 756, 783],
[445, 553, 384, 130, 668], [157, 805, 969, 18, 304],
[551, 676, 558, 200, 793]]),
[631, 668, 646, 712, 825, 783, 668, 304, 793, 200, 558,
676, 551, 157, 445, 953, 573, 100, 756, 130, 18, 969,
805, 553, 384])
def test_snail_098(self):
self.assertEqual(snail(
[[236, 796, 566, 79, 878, 3], [813, 495, 352, 703, 329, 840],
[874, 879, 560, 307, 997, 4], [596, 458, 407, 889, 536, 319],
[334, 151, 460, 511, 411, 855],
[144, 572, 272, 495, 545, 622]]),
[236, 796, 566, 79, 878, 3, 840, 4, 319, 855, 622, 545,
495, 272, 572, 144, 334, 596, 874, 813, 495, 352, 703,
329, 997, 536, 411, 511, 460, 151, 458, 879, 560, 307,
889, 407])
def test_snail_099(self):
self.assertEqual(snail(
[[222, 261, 661, 331, 511, 364], [571, 689, 58, 265, 565, 413],
[68, 5, 853, 541, 890, 410], [571, 71, 770, 563, 603, 521],
[42, 417, 725, 971, 15, 780], [958, 98, 870, 20, 856, 994]]),
[222, 261, 661, 331, 511, 364, 413, 410, 521, 780, 994,
856, 20, 870, 98, 958, 42, 571, 68, 571, 689, 58, 265,
565, 890, 603, 15, 971, 725, 417, 71, 5, 853, 541,
563, 770])
def test_snail_100(self):
self.assertEqual(snail(
[[641, 678, 48, 894, 850], [974, 949, 998, 825, 286],
[979, 768, 792, 384, 688], [430, 622, 694, 337, 275],
[494, 313, 309, 70, 415]]),
[641, 678, 48, 894, 850, 286, 688, 275, 415, 70, 309,
313, 494, 430, 979, 974, 949, 998, 825, 384, 337, 694,
622, 768, 792])
def test_snail_101(self):
self.assertEqual(snail(
[[600, 786, 254, 655, 13], [815, 239, 774, 325, 57],
[499, 747, 251, 914, 861], [605, 594, 499, 646, 290],
[650, 496, 385, 387, 819]]),
[600, 786, 254, 655, 13, 57, 861, 290, 819, 387, 385,
496, 650, 605, 499, 815, 239, 774, 325, 914, 646, 499,
594, 747, 251])
def test_snail_102(self):
self.assertEqual(snail(
[[903, 696, 410, 542, 956, 889], [381, 306, 48, 102, 268, 355],
[341, 279, 537, 59, 605, 18], [909, 865, 234, 770, 206, 806],
[784, 781, 945, 285, 355, 765],
[100, 748, 473, 319, 150, 998]]),
[903, 696, 410, 542, 956, 889, 355, 18, 806, 765, 998,
150, 319, 473, 748, 100, 784, 909, 341, 381, 306, 48,
102, 268, 605, 206, 355, 285, 945, 781, 865, 279, 537,
59, 770, 234])
def test_snail_103(self):
self.assertEqual(snail([[67, 123, 678, 842, 28, 690, 189, 182, 636, 645,
118, 123, 95, 723],
[119, 324, 247, 8, 860, 329, 180, 791, 92, 5,
896, 921, 157, 781],
[756, 950, 738, 573, 101, 446, 468, 594, 316,
962, 708, 168, 889, 8],
[301, 352, 920, 673, 245, 759, 242, 43, 761,
460, 76, 551, 315, 376],
[915, 367, 345, 8, 132, 840, 451, 965, 757, 558,
94, 882, 847, 82],
[949, 651, 239, 941, 544, 782, 220, 763, 384,
515, 840, 377, 809, 300],
[527, 728, 875, 620, 247, 792, 385, 44, 925,
697, 947, 541, 224, 364],
[824, 885, 723, 353, 77, 915, 880, 339, 809,
919, 931, 569, 980, 357],
[89, 100, 893, 516, 786, 59, 365, 967, 122, 103,
55, 42, 683, 101],
[281, 457, 924, 813, 624, 997, 346, 613, 116,
655, 465, 786, 936, 94],
[185, 898, 588, 272, 712, 367, 435, 660, 152,
896, 792, 670, 272, 397],
[891, 754, 108, 844, 44, 648, 684, 571, 201,
745, 440, 88, 511, 214],
[130, 632, 977, 354, 353, 918, 736, 349, 662,
185, 31, 307, 460, 17],
[910, 947, 686, 668, 857, 345, 654, 678, 27, 78,
445, 639, 130, 970]]),
[67, 123, 678, 842, 28, 690, 189, 182, 636, 645, 118,
123, 95, 723, 781, 8, 376, 82, 300, 364, 357, 101, 94,
397, 214, 17, 970, 130, 639, 445, 78, 27, 678, 654,
345, 857, 668, 686, 947, 910, 130, 891, 185, 281, 89,
824, 527, 949, 915, 301, 756, 119, 324, 247, 8, 860,
329, 180, 791, 92, 5, 896, 921, 157, 889, 315, 847,
809, 224, 980, 683, 936, 272, 511, 460, 307, 31, 185,
662, 349, 736, 918, 353, 354, 977, 632, 754, 898, 457,
100, 885, 728, 651, 367, 352, 950, 738, 573, 101, 446,
468, 594, 316, 962, 708, 168, 551, 882, 377, 541, 569,
42, 786, 670, 88, 440, 745, 201, 571, 684, 648, 44,
844, 108, 588, 924, 893, 723, 875, 239, 345, 920, 673,
245, 759, 242, 43, 761, 460, 76, 94, 840, 947, 931,
55, 465, 792, 896, 152, 660, 435, 367, 712, 272, 813,
516, 353, 620, 941, 8, 132, 840, 451, 965, 757, 558,
515, 697, 919, 103, 655, 116, 613, 346, 997, 624, 786,
77, 247, 544, 782, 220, 763, 384, 925, 809, 122, 967,
365, 59, 915, 792, 385, 44, 339, 880])
def test_snail_104(self):
self.assertEqual(snail(
[[676, 9, 91, 957, 699, 130, 983, 164, 75, 728, 784, 125],
[959, 741, 710, 154, 452, 919, 66, 444, 298, 140, 516, 760],
[969, 514, 898, 621, 984, 616, 724, 738, 410, 758, 829, 482],
[82, 491, 417, 173, 563, 494, 155, 763, 850, 915, 625, 396],
[407, 299, 773, 4, 428, 630, 822, 484, 922, 625, 114, 116],
[571, 180, 379, 284, 947, 688, 749, 312, 502, 935, 879, 166],
[641, 719, 73, 623, 940, 590, 81, 644, 997, 378, 847, 501],
[143, 843, 814, 259, 354, 459, 804, 43, 854, 1000, 382, 717],
[602, 64, 272, 149, 285, 862, 958, 662, 556, 426, 259, 197],
[341, 882, 400, 559, 227, 498, 699, 298, 354, 739, 67, 465],
[43, 85, 467, 557, 614, 63, 680, 434, 937, 472, 488, 243],
[978, 607, 559, 134, 531, 907, 4, 813, 259, 203, 384, 836]]),
[676, 9, 91, 957, 699, 130, 983, 164, 75, 728, 784,
125, 760, 482, 396, 116, 166, 501, 717, 197, 465, 243,
836, 384, 203, 259, 813, 4, 907, 531, 134, 559, 607,
978, 43, 341, 602, 143, 641, 571, 407, 82, 969, 959,
741, 710, 154, 452, 919, 66, 444, 298, 140, 516, 829,
625, 114, 879, 847, 382, 259, 67, 488, 472, 937, 434,
680, 63, 614, 557, 467, 85, 882, 64, 843, 719, 180,
299, 491, 514, 898, 621, 984, 616, 724, 738, 410, 758,
915, 625, 935, 378, 1000, 426, 739, 354, 298, 699,
498, 227, 559, 400, 272, 814, 73, 379, 773, 417, 173,
563, 494, 155, 763, 850, 922, 502, 997, 854, 556, 662,
958, 862, 285, 149, 259, 623, 284, 4, 428, 630, 822,
484, 312, 644, 43, 804, 459, 354, 940, 947, 688, 749,
81, 590])
def test_snail_105(self):
self.assertEqual(snail([[221, 977, 163, 642, 495, 250, 823, 751, 152,
681, 814, 539, 941],
[468, 2, 934, 705, 319, 208, 994, 960, 167, 267,
861, 499, 535],
[516, 31, 300, 893, 235, 842, 13, 44, 235, 236,
590, 711, 174],
[485, 83, 36, 462, 822, 458, 963, 829, 626, 699,
602, 248, 286],
[978, 561, 292, 53, 972, 119, 694, 401, 852,
589, 498, 115, 828],
[737, 896, 881, 863, 502, 217, 584, 390, 621,
373, 777, 312, 425],
[453, 827, 774, 783, 322, 350, 606, 786, 709,
627, 579, 314, 700],
[651, 203, 681, 752, 10, 116, 64, 885, 121, 445,
385, 283, 307],
[884, 35, 523, 791, 169, 338, 411, 749, 48, 662,
878, 314, 802],
[137, 755, 589, 409, 870, 857, 687, 37, 818,
206, 952, 505, 337],
[695, 928, 533, 370, 363, 71, 386, 823, 685,
859, 107, 313, 958],
[58, 267, 988, 746, 601, 767, 701, 27, 565, 434,
734, 942, 572],
[405, 442, 424, 298, 14, 428, 699, 906, 900,
928, 97, 783, 273]]),
[221, 977, 163, 642, 495, 250, 823, 751, 152, 681, 814,
539, 941, 535, 174, 286, 828, 425, 700, 307, 802, 337,
958, 572, 273, 783, 97, 928, 900, 906, 699, 428, 14,
298, 424, 442, 405, 58, 695, 137, 884, 651, 453, 737,
978, 485, 516, 468, 2, 934, 705, 319, 208, 994, 960,
167, 267, 861, 499, 711, 248, 115, 312, 314, 283, 314,
505, 313, 942, 734, 434, 565, 27, 701, 767, 601, 746,
988, 267, 928, 755, 35, 203, 827, 896, 561, 83, 31,
300, 893, 235, 842, 13, 44, 235, 236, 590, 602, 498,
777, 579, 385, 878, 952, 107, 859, 685, 823, 386, 71,
363, 370, 533, 589, 523, 681, 774, 881, 292, 36, 462,
822, 458, 963, 829, 626, 699, 589, 373, 627, 445, 662,
206, 818, 37, 687, 857, 870, 409, 791, 752, 783, 863,
53, 972, 119, 694, 401, 852, 621, 709, 121, 48, 749,
411, 338, 169, 10, 322, 502, 217, 584, 390, 786, 885,
64, 116, 350, 606])
| 2.765625 | 3 |
python/tink/jwt/_raw_jwt.py | cuonglm/tink | 0 | 3027 | <gh_stars>0
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""The raw JSON Web Token (JWT)."""
import copy
import datetime
import json
from typing import cast, Mapping, Set, List, Dict, Optional, Text, Union, Any
from tink import core
from tink.jwt import _jwt_error
from tink.jwt import _jwt_format
_REGISTERED_NAMES = frozenset({'iss', 'sub', 'jti', 'aud', 'exp', 'nbf', 'iat'})
_MAX_TIMESTAMP_VALUE = 253402300799 # 31 Dec 9999, 23:59:59 GMT
Claim = Union[None, bool, int, float, Text, List[Any], Dict[Text, Any]]
def _from_datetime(t: datetime.datetime) -> float:
if not t.tzinfo:
raise _jwt_error.JwtInvalidError('datetime must have tzinfo')
return t.timestamp()
def _to_datetime(timestamp: float) -> datetime.datetime:
return datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
def _validate_custom_claim_name(name: Text) -> None:
if name in _REGISTERED_NAMES:
raise _jwt_error.JwtInvalidError(
'registered name %s cannot be custom claim name' % name)
class RawJwt(object):
"""A raw JSON Web Token (JWT).
It can be signed to obtain a compact JWT. It is also used as a parse token
that has not yet been verified.
"""
def __new__(cls):
raise core.TinkError('RawJwt cannot be instantiated directly.')
def __init__(self, type_header: Optional[Text], payload: Dict[Text,
Any]) -> None:
# No need to copy payload, because only create and from_json_payload
# call this method.
if not isinstance(payload, Dict):
raise _jwt_error.JwtInvalidError('payload must be a dict')
self._type_header = type_header
self._payload = payload
self._validate_string_claim('iss')
self._validate_string_claim('sub')
self._validate_string_claim('jti')
self._validate_timestamp_claim('exp')
self._validate_timestamp_claim('nbf')
self._validate_timestamp_claim('iat')
self._validate_audience_claim()
def _validate_string_claim(self, name: Text):
if name in self._payload:
if not isinstance(self._payload[name], Text):
raise _jwt_error.JwtInvalidError('claim %s must be a String' % name)
def _validate_timestamp_claim(self, name: Text):
if name in self._payload:
timestamp = self._payload[name]
if not isinstance(timestamp, (int, float)):
raise _jwt_error.JwtInvalidError('claim %s must be a Number' % name)
if timestamp > _MAX_TIMESTAMP_VALUE or timestamp < 0:
raise _jwt_error.JwtInvalidError(
'timestamp of claim %s is out of range' % name)
def _validate_audience_claim(self):
if 'aud' in self._payload:
audiences = self._payload['aud']
if isinstance(audiences, Text):
self._payload['aud'] = [audiences]
return
if not isinstance(audiences, list) or not audiences:
raise _jwt_error.JwtInvalidError('audiences must be a non-empty list')
if not all(isinstance(value, Text) for value in audiences):
raise _jwt_error.JwtInvalidError('audiences must only contain Text')
# TODO(juerg): Consider adding a raw_ prefix to all access methods
def has_type_header(self) -> bool:
return self._type_header is not None
def type_header(self) -> Text:
if not self.has_type_header():
raise KeyError('type header is not set')
return self._type_header
def has_issuer(self) -> bool:
return 'iss' in self._payload
def issuer(self) -> Text:
return cast(Text, self._payload['iss'])
def has_subject(self) -> bool:
return 'sub' in self._payload
def subject(self) -> Text:
return cast(Text, self._payload['sub'])
def has_audiences(self) -> bool:
return 'aud' in self._payload
def audiences(self) -> List[Text]:
return list(self._payload['aud'])
def has_jwt_id(self) -> bool:
return 'jti' in self._payload
def jwt_id(self) -> Text:
return cast(Text, self._payload['jti'])
def has_expiration(self) -> bool:
return 'exp' in self._payload
def expiration(self) -> datetime.datetime:
return _to_datetime(self._payload['exp'])
def has_not_before(self) -> bool:
return 'nbf' in self._payload
def not_before(self) -> datetime.datetime:
return _to_datetime(self._payload['nbf'])
def has_issued_at(self) -> bool:
return 'iat' in self._payload
def issued_at(self) -> datetime.datetime:
return _to_datetime(self._payload['iat'])
def custom_claim_names(self) -> Set[Text]:
return {n for n in self._payload.keys() if n not in _REGISTERED_NAMES}
def custom_claim(self, name: Text) -> Claim:
_validate_custom_claim_name(name)
value = self._payload[name]
if isinstance(value, (list, dict)):
return copy.deepcopy(value)
else:
return value
def json_payload(self) -> Text:
"""Returns the payload encoded as JSON string."""
return _jwt_format.json_dumps(self._payload)
@classmethod
def create(cls,
*,
type_header: Optional[Text] = None,
issuer: Optional[Text] = None,
subject: Optional[Text] = None,
audiences: Optional[List[Text]] = None,
jwt_id: Optional[Text] = None,
expiration: Optional[datetime.datetime] = None,
not_before: Optional[datetime.datetime] = None,
issued_at: Optional[datetime.datetime] = None,
custom_claims: Mapping[Text, Claim] = None) -> 'RawJwt':
"""Create a new RawJwt instance."""
payload = {}
if issuer:
payload['iss'] = issuer
if subject:
payload['sub'] = subject
if jwt_id is not None:
payload['jti'] = jwt_id
if audiences is not None:
payload['aud'] = copy.copy(audiences)
if expiration:
payload['exp'] = _from_datetime(expiration)
if not_before:
payload['nbf'] = _from_datetime(not_before)
if issued_at:
payload['iat'] = _from_datetime(issued_at)
if custom_claims:
for name, value in custom_claims.items():
_validate_custom_claim_name(name)
if not isinstance(name, Text):
raise _jwt_error.JwtInvalidError('claim name must be Text')
if (value is None or isinstance(value, (bool, int, float, Text))):
payload[name] = value
elif isinstance(value, list):
payload[name] = json.loads(json.dumps(value))
elif isinstance(value, dict):
payload[name] = json.loads(json.dumps(value))
else:
raise _jwt_error.JwtInvalidError('claim %s has unknown type' % name)
raw_jwt = object.__new__(cls)
raw_jwt.__init__(type_header, payload)
return raw_jwt
@classmethod
def from_json(cls, type_header: Optional[Text], payload: Text) -> 'RawJwt':
"""Creates a RawJwt from payload encoded as JSON string."""
raw_jwt = object.__new__(cls)
raw_jwt.__init__(type_header, _jwt_format.json_loads(payload))
return raw_jwt
| 2.140625 | 2 |
plenum/test/view_change/test_no_instance_change_before_node_is_ready.py | evernym/indy-plenum | 0 | 3028 | import pytest
from plenum.server.view_change.view_changer import ViewChanger
from stp_core.common.log import getlogger
from plenum.test.pool_transactions.helper import start_not_added_node, add_started_node
logger = getlogger()
@pytest.fixture(scope="module", autouse=True)
def tconf(tconf):
old_vc_timeout = tconf.VIEW_CHANGE_TIMEOUT
tconf.VIEW_CHANGE_TIMEOUT = 10
yield tconf
tconf.VIEW_CHANGE_TIMEOUT = old_vc_timeout
def test_no_instance_change_on_primary_disconnection_for_not_ready_node(
looper, txnPoolNodeSet, tdir, tconf,
allPluginsPath, sdk_pool_handle, sdk_wallet_steward):
"""
Test steps:
1. create a new node, but don't add it to the pool (so not send NODE txn), so that the node is not ready.
2. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
3. make sure no InstanceChange sent by the new node
4. add the node to the pool (send NODE txn) and make sure that the node is ready now.
5. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
6. make sure no InstanceChange sent by the new node
"""
# 1. create a new node, but don't add it to the pool (so not send NODE txn), so that the node is not ready.
sigseed, bls_key, new_node, node_ha, client_ha = \
start_not_added_node(looper,
tdir, tconf, allPluginsPath,
"TestTheta")
# 2. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
looper.runFor(tconf.VIEW_CHANGE_TIMEOUT + 2)
# 3. make sure no InstanceChange sent by the new node
assert 0 == new_node.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
logger.info("Start added node {}".format(new_node))
# 4. add the node to the pool (send NODE txn) and make sure that the node is ready now.
add_started_node(looper,
new_node,
node_ha,
client_ha,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_steward,
bls_key)
# 5. wait for more than VIEW_CHANGE_TIMEOUT (a timeout for initial check for disconnected primary)
looper.runFor(tconf.VIEW_CHANGE_TIMEOUT + 2)
# 6. make sure no InstanceChange sent by the new node
assert 0 == new_node.view_changer.spylog.count(ViewChanger.sendInstanceChange.__name__)
| 2.03125 | 2 |
src/cache/requests_cache_abstract.py | tomaszkingukrol/rest-api-cache-proxy | 0 | 3029 | <reponame>tomaszkingukrol/rest-api-cache-proxy
from abc import ABC, abstractclassmethod
from model.response import ResponseModel
class CacheInterface(ABC):
@abstractclassmethod
async def get(cls, url: str) -> ResponseModel: pass
@abstractclassmethod
async def set(cls, url: str, value: ResponseModel, ttl=0): pass
| 2.59375 | 3 |
dialogue-engine/test/programytest/config/file/test_json.py | cotobadesign/cotoba-agent-oss | 104 | 3030 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
from programy.config.file.json_file import JSONConfigurationFile
from programy.clients.events.console.config import ConsoleConfiguration
from programy.utils.substitutions.substitues import Substitutions
from programytest.config.file.base_file_tests import ConfigurationBaseFileTests
class JSONConfigurationFileTests(ConfigurationBaseFileTests):
def test_get_methods(self):
config_data = JSONConfigurationFile()
self.assertIsNotNone(config_data)
configuration = config_data.load_from_text("""
{
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
section = config_data.get_section("brainx")
self.assertIsNone(section)
section = config_data.get_section("brain")
self.assertIsNotNone(section)
child_section = config_data.get_section("overrides", section)
self.assertIsNotNone(child_section)
keys = list(config_data.get_child_section_keys("overrides", section))
self.assertIsNotNone(keys)
self.assertEqual(3, len(keys))
self.assertTrue("allow_system_aiml" in keys)
self.assertTrue("allow_learn_aiml" in keys)
self.assertTrue("allow_learnf_aiml" in keys)
self.assertIsNone(config_data.get_child_section_keys("missing", section))
self.assertEqual(True, config_data.get_option(child_section, "allow_system_aiml"))
self.assertEqual(True, config_data.get_option(child_section, "missing", missing_value=True))
self.assertEqual(True, config_data.get_bool_option(child_section, "allow_system_aiml"))
self.assertEqual(False, config_data.get_bool_option(child_section, "other_value"))
self.assertEqual(0, config_data.get_int_option(child_section, "other_value"))
def test_load_from_file(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_file(os.path.dirname(__file__) + os.sep + "test_json.json", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assert_configuration(configuration)
def test_load_from_text_multis_one_value(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"bot": {
"brain": "bot1"
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assertEqual(1, len(configuration.client_configuration.configurations[0].configurations))
def test_load_from_text_multis_multiple_values(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"console": {
"bot": "bot"
},
"bot": {
"brain": ["bot1", "bot2"]
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assertEqual(2, len(configuration.client_configuration.configurations[0].configurations))
def test_load_from_text(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"console": {
"bot": "bot",
"prompt": ">>>",
"scheduler": {
"name": "Scheduler1",
"debug_level": 50,
"add_listeners": false,
"remove_all_jobs": false
},
"storage": {
"entities": {
"users": "sql",
"linked_accounts": "sql",
"links": "sql",
"properties": "file",
"conversations": "file",
"categories": "file",
"maps": "file",
"sets": "file",
"rdf": "file",
"denormal": "file",
"normal": "file",
"gender": "file",
"person": "file",
"person2": "file",
"spelling_corpus": "file",
"license_keys": "file",
"nodes": "file",
"binaries": "file",
"braintree": "file",
"preprocessors": "file",
"postprocessors": "file",
"regex_templates": "file",
"usergroups": "file",
"learnf": "file"
},
"stores": {
"sql": {
"type": "sql",
"config": {
"url": "sqlite:///:memory",
"echo": false,
"encoding": "utf-8",
"create_db": true,
"drop_all_first": true
}
},
"mongo": {
"type": "mongo",
"config": {
"url": "mongodb://localhost:27017/",
"database": "programy",
"drop_all_first": true
}
},
"redis": {
"type": "redis",
"config": {
"host": "localhost",
"port": 6379,
"password": <PASSWORD>,
"db": 0,
"prefix": "programy",
"drop_all_first": true
}
},
"file": {
"type": "file",
"config": {
"category_storage": {
"files": "./storage/categories"
},
"conversations_storage": {
"files": "./storage/conversations"
},
"sets_storage": {
"files": "./storage/sets",
"extension": ".txt",
"directories": false
},
"maps_storage": {
"files": "./storage/maps",
"extension": ".txt",
"directories": false
},
"regex_templates": {
"files": "./storage/regex"
},
"lookups_storage": {
"files": "./storage/lookups",
"extension": ".txt",
"directories": false
},
"properties_storage": {
"file": "./storage/properties.txt"
},
"defaults_storage": {
"file": "./storage/defaults.txt"
},
"rdf_storage": {
"files": "./storage/rdfs",
"extension": ".txt",
"directories": true
},
"spelling_corpus": {
"file": "./storage/spelling/corpus.txt"
},
"license_keys": {
"file": "./storage/license.keys"
},
"nodes": {
"files": "./storage/nodes"
},
"binaries": {
"files": "./storage/binaries"
},
"braintree": {
"file": "./storage/braintree/braintree.xml",
"format": "xml"
},
"preprocessors": {
"file": "./storage/processing/preprocessors.txt"
},
"postprocessors": {
"file": "./storage/processing/postprocessing.txt"
},
"usergroups": {
"files": "./storage/security/usergroups.txt"
},
"learnf": {
"files": "./storage/categories/learnf"
}
}
}
}
},
"logger": {
"type": "logger",
"config": {
"conversation_logger": "conversation"
}
}
},
"voice": {
"license_keys": "$BOT_ROOT/config/license.keys",
"tts": "osx",
"stt": "azhang",
"osx": {
"classname": "talky.clients.voice.tts.osxsay.OSXSayTextToSpeach"
},
"pytts": {
"classname": "talky.clients.voice.tts.pyttssay.PyTTSSayTextToSpeach",
"rate_adjust": 10
},
"azhang": {
"classname": "talky.clients.voice.stt.azhang.AnthonyZhangSpeechToText",
"ambient_adjust": 3,
"service": "ibm"
}
},
"rest": {
"host": "0.0.0.0",
"port": 8989,
"debug": false,
"workers": 4,
"license_keys": "$BOT_ROOT/config/license.keys"
},
"webchat": {
"host": "0.0.0.0",
"port": 8090,
"debug": false,
"license_keys": "$BOT_ROOT/config/license.keys",
"api": "/api/web/v1.0/ask"
},
"twitter": {
"polling": true,
"polling_interval": 49,
"streaming": false,
"use_status": true,
"use_direct_message": true,
"auto_follow": true,
"storage": "file",
"welcome_message": "Thanks for following me, send me a message and I'll try and help",
"license_keys": "file"
},
"xmpp": {
"server": "talk.google.com",
"port": 5222,
"xep_0030": true,
"xep_0004": true,
"xep_0060": true,
"xep_0199": true,
"license_keys": "file"
},
"socket": {
"host": "127.0.0.1",
"port": 9999,
"queue": 5,
"debug": true,
"license_keys": "file"
},
"telegram": {
"unknown_command": "Sorry, that is not a command I have been taught yet!",
"license_keys": "file"
},
"facebook": {
"host": "127.0.0.1",
"port": 5000,
"debug": false,
"license_keys": "file"
},
"twilio": {
"host": "127.0.0.1",
"port": 5000,
"debug": false,
"license_keys": "file"
},
"slack": {
"polling_interval": 1,
"license_keys": "file"
},
"viber": {
"name": "Servusai",
"avatar": "http://viber.com/avatar.jpg",
"license_keys": "file"
},
"line": {
"host": "127.0.0.1",
"port": 8084,
"debug": false,
"license_keys": "file"
},
"kik": {
"bot_name": "servusai",
"webhook": "https://93638f7a.ngrok.io/api/kik/v1.0/ask",
"host": "127.0.0.1",
"port": 8082,
"debug": false,
"license_keys": "file"
},
"bot": {
"brain": "brain",
"initial_question": "Hi, how can I help you today?",
"initial_question_srai": "YINITIALQUESTION",
"default_response": "Sorry, I don't have an answer for that!",
"default_response_srai": "YEMPTY",
"empty_string": "YEMPTY",
"exit_response": "So long, and thanks for the fish!",
"exit_response_srai": "YEXITRESPONSE",
"override_properties": true,
"max_question_recursion": 1000,
"max_question_timeout": 60,
"max_search_depth": 100,
"max_search_timeout": 60,
"spelling": {
"load": true,
"classname": "programy.spelling.norvig.NorvigSpellingChecker",
"check_before": true,
"check_and_retry": true
},
"conversations": {
"max_histories": 100,
"restore_last_topic": false,
"initial_topic": "TOPIC1",
"empty_on_start": false
}
},
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
},
"defaults": {
"default-get": "unknown",
"default-property": "unknown",
"default-map": "unknown",
"learnf-path": "file"
},
"binaries": {
"save_binary": true,
"load_binary": true,
"load_aiml_on_binary_fail": true
},
"braintree": {
"create": true
},
"services": {
"REST": {
"classname": "programy.services.rest.GenericRESTService",
"method": "GET",
"host": "0.0.0.0",
"port": 8080
},
"Pannous": {
"classname": "programy.services.pannous.PannousService",
"url": "http://weannie.pannous.com/api"
}
},
"security": {
"authentication": {
"classname": "programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService",
"denied_srai": "AUTHENTICATION_FAILED"
},
"authorisation": {
"classname": "programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService",
"denied_srai": "AUTHORISATION_FAILED",
"usergroups": {
"storage": "file"
}
}
},
"oob": {
"default": {
"classname": "programy.oob.defaults.default.DefaultOutOfBandProcessor"
},
"alarm": {
"classname": "programy.oob.defaults.alarm.AlarmOutOfBandProcessor"
},
"camera": {
"classname": "programy.oob.defaults.camera.CameraOutOfBandProcessor"
},
"clear": {
"classname": "programy.oob.defaults.clear.ClearOutOfBandProcessor"
},
"dial": {
"classname": "programy.oob.defaults.dial.DialOutOfBandProcessor"
},
"dialog": {
"classname": "programy.oob.defaults.dialog.DialogOutOfBandProcessor"
},
"email": {
"classname": "programy.oob.defaults.email.EmailOutOfBandProcessor"
},
"geomap": {
"classname": "programy.oob.defaults.map.MapOutOfBandProcessor"
},
"schedule": {
"classname": "programy.oob.defaults.schedule.ScheduleOutOfBandProcessor"
},
"search": {
"classname": "programy.oob.defaults.search.SearchOutOfBandProcessor"
},
"sms": {
"classname": "programy.oob.defaults.sms.SMSOutOfBandProcessor"
},
"url": {
"classname": "programy.oob.defaults.url.URLOutOfBandProcessor"
},
"wifi": {
"classname": "programy.oob.defaults.wifi.WifiOutOfBandProcessor"
}
},
"dynamic": {
"variables": {
"gettime": "programy.dynamic.variables.datetime.GetTime"
},
"sets": {
"numeric": "programy.dynamic.sets.numeric.IsNumeric",
"roman": "programy.dynamic.sets.roman.IsRomanNumeral"
},
"maps": {
"romantodec": "programy.dynamic.maps.roman.MapRomanToDecimal",
"dectoroman": "programy.dynamic.maps.roman.MapDecimalToRoman"
}
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assert_configuration(configuration)
def test_load_additionals(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"console": {
"bot": "bot"
},
"bot": {
"brain": "brain"
},
"brain": {
"security": {
"authentication": {
"classname": "programy.security.authenticate.passthrough.PassThroughAuthenticationService",
"denied_srai": "ACCESS_DENIED"
}
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
auth_service = configuration.client_configuration.configurations[0].configurations[0].security.authentication
self.assertIsNotNone(auth_service)
self.assertEqual("ACCESS_DENIED", auth_service.denied_srai)
def test_load_with_subs(self):
subs = Substitutions()
subs.add_substitute("$ALLOW_SYSTEM", True)
config_data = JSONConfigurationFile()
self.assertIsNotNone(config_data)
configuration = config_data.load_from_text("""
{
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
section = config_data.get_section("brainx")
self.assertIsNone(section)
section = config_data.get_section("brain")
self.assertIsNotNone(section)
child_section = config_data.get_section("overrides", section)
self.assertIsNotNone(child_section)
self.assertEqual(True, config_data.get_option(child_section, "allow_system_aiml"))
self.assertEqual(True, config_data.get_bool_option(child_section, "allow_system_aiml"))
self.assertEqual(False, config_data.get_bool_option(child_section, "other_value"))
| 1.796875 | 2 |
searching/jump_search.py | magnusrodseth/data-structures-and-algorithms | 0 | 3031 | import math
from typing import List
def jump_search(array: List[int], value: int) -> int:
"""
Performs a jump search on a list of integers.
:param array: is the array to search.
:param value: is the value to search.
:return: the index of the value, or -1 if it doesn't exist.'
"""
if len(array) == 0:
return -1
block_size = get_block_size(array)
# Pointers for traversing the array
start_pointer = 0
next_pointer = block_size
while (start_pointer < len(array)) and (array[next_pointer - 1] < value):
start_pointer = next_pointer
next_pointer += block_size
# Prevent next from going out of bounds
if next_pointer > len(array):
next_pointer = len(array)
# Linear search through the relevant block
for i in range(start_pointer, next_pointer):
if array[i] == value:
return i
return -1
def get_block_size(array: List[int]) -> int:
"""
Gets the block size of an array for jump search.
The block size is the square root of the length of the array.
We then calculate the absolute value of this block size, because we're using the value as
index pointer, and negative values do not make sense here.
This value is then floored to act as index pointer in the array.
:param array: is the array to search.
:return: the block size to be used in jump search.
"""
return math.floor(abs(math.sqrt(len(array))))
if __name__ == '__main__':
# Array must be sorted in order for binary search to work
array = [3, 5, 6, 9, 11, 18, 20, 21, 24, 30]
print(array)
index = jump_search(array, 31)
print(index)
| 4.40625 | 4 |
pincer/objects/message/sticker.py | mjneff2/Pincer | 0 | 3032 | <reponame>mjneff2/Pincer
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
from __future__ import annotations
from dataclasses import dataclass
from enum import IntEnum
from typing import List, Optional, TYPE_CHECKING
from ...utils.api_object import APIObject
from ...utils.types import MISSING
if TYPE_CHECKING:
from ..user import User
from ...utils import APINullable, Snowflake
class StickerType(IntEnum):
"""
Displays from where the sticker comes from.
:param STANDARD:
Sticker is included in the default Discord sticker pack.
:param GUILD:
Sticker is a custom sticker from a discord server.
"""
STANDARD = 1
GUILD = 2
class StickerFormatType(IntEnum):
"""
The type of the sticker.
:param PNG:
Sticker is of PNG format.
:param APNG:
Sticker is animated with APNG format.
:param LOTTIE:
Sticker is animated with with LOTTIE format. (vector based)
"""
PNG = 1
APNG = 2
LOTTIE = 3
@dataclass
class Sticker(APIObject):
"""
Represents a Discord sticker.
:param description:
description of the sticker
:param format_type:
type of sticker format
:param id:
id of the sticker
:param name:
name of the sticker
:param tags:
for guild stickers, the Discord name of a unicode emoji
representing the sticker's expression. For standard stickers,
a comma-separated list of related expressions.
:param type:
type of sticker
:param available:
whether this guild sticker can be used,
may be false due to loss of Server Boosts
:param guild_id:
id of the guild that owns this sticker
:param pack_id:
for standard stickers, id of the pack the sticker is from
:param sort_value:
the standard sticker's sort order within its pack
:param user:
the user that uploaded the guild sticker
"""
description: Optional[str]
format_type: StickerFormatType
id: Snowflake
name: str
tags: str
type: StickerType
available: APINullable[bool] = MISSING
guild_id: APINullable[Snowflake] = MISSING
pack_id: APINullable[Snowflake] = MISSING
sort_value: APINullable[int] = MISSING
user: APINullable[User] = MISSING
@dataclass
class StickerItem(APIObject):
"""
Represents the smallest amount of data required to render a sticker.
A partial sticker object.
:param id:
id of the sticker
:param name:
name of the sticker
:param format_type:
type of sticker format
"""
id: Snowflake
name: str
format_type: StickerFormatType
@dataclass
class StickerPack(APIObject):
"""
Represents a pack of standard stickers.
:param id:
id of the sticker pack
:param stickers:
the stickers in the pack
:param name:
name of the sticker pack
:param sku_id:
id of the pack's SKU
:param description:
description of the sticker pack
:param cover_sticker_id:
id of a sticker in the pack which is shown as the pack's icon
:param banner_asset_id:
id of the sticker pack's banner image
"""
id: Snowflake
stickers: List[Sticker]
name: str
sku_id: Snowflake
description: str
cover_sticker_id: APINullable[Snowflake] = MISSING
banner_asset_id: APINullable[Snowflake] = MISSING
| 2.40625 | 2 |
app/core/utils.py | yayunl/llfselfhelp | 0 | 3033 | from django.views.generic import \
UpdateView as BaseUpdateView
class UpdateView(BaseUpdateView):
template_name_suffix = '_form_update'
| 1.320313 | 1 |
demo/test_bug_3d.py | zhanwj/multi-task-pytorch | 2 | 3034 | import torch
import lib.modeling.resnet as resnet
import lib.modeling.semseg_heads as snet
import torch.nn as nn
import torch.optim as optim
import utils.resnet_weights_helper as resnet_utils
from torch.autograd import Variable
from roi_data.loader import RoiDataLoader, MinibatchSampler, collate_minibatch, collate_minibatch_semseg
from datasets.roidb import combined_roidb_for_training, combined_roidb_for_training_semseg
import os
import numpy as np
import nn as mynn
import cv2
from modeling.model_builder_3DSD import Generalized_3DSD
from modeling.model_builder_PSP3D import DispSeg
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
#load net
class load_net(nn.Module):
def __init__(self):
super(load_net, self).__init__()
build=snet.ModelBuilder()
fc_dim = 2048
self.encoder = build.build_encoder(
arch= 'resnet50_dilated8',
fc_dim=fc_dim)
self.decoder = build.build_decoder(
arch = 'ppm_bilinear',
num_class=19,
fc_dim=fc_dim,
use_softmax=False)
def _init_modules(self):
resnet_utils.load_pretrained_imagenet_weights(self)
def forward(self, data):
pred=self.decoder(self.encoder(data, return_feature_maps=True))
pred = nn.functional.interpolate(
pred, size=[128,128],
mode='bilinear', align_corners=False)
pred = nn.functional.log_softmax(pred, dim=1)
return pred
def dataloader(bs, gpus):
inputs = {}
inputs['data'] = Variable(torch.randn(2*bs, 3, 128, 128)).to('cuda')
inputs['semseg_label_0'] = Variable(torch.LongTensor(
np.random.randint(0, 19, (bs, 128//8, 128//8), dtype=np.long))).to('cuda')
inputs['disp_label_0'] = Variable(torch.rand(bs, 128//8, 128//8)).to('cuda')
inputs['disp_scans'] = Variable(torch.arange(0,
cfg.DISP.MAX_DISPLACEMENT).float().view(1,cfg.DISP.MAX_DISPLACEMENT,1,1).repeat(bs,1,1,1)).to('cuda')
inputs['semseg_scans'] = Variable(torch.arange(0,
cfg.MODEL.NUM_CLASSES).float().view(1, cfg.MODEL.NUM_CLASSES, 1, 1).repeat(bs,1,1,1)).to('cuda')
return inputs
cfg_file = 'e2e_segdisp-R-50_3Dpool_1x.yaml'
cfg_from_file(cfg_file)
print (cfg.SEM)
print (cfg.DISP)
#cfg_from_list(cfg_file)
#assert_and_infer_cfg()
devices_ids=[5]
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(ids) for ids in devices_ids])
torch.backends.cudnn.benchmark=True
#torch.cuda.set_device(3)
len_gpus = len(devices_ids)
batch_size = 2 * len_gpus
#net = mynn.DataParallel(load_net().to('cuda'), minibatch=True)
net = mynn.DataParallel(DispSeg().to('cuda'), minibatch=True)
optimizer = optim.SGD(net.parameters(), lr=0.000875, momentum=0.9)
criterion = nn.NLLLoss(ignore_index=255)
#dataloader= dataloader(batch_size, len_gpus)
for i in range(10):
#for i, inputs in zip(range(1000), dataloader):
inputs = dataloader(batch_size, len_gpus)
for key in inputs:
inputs[key] = torch.chunk(inputs[key], chunks=len_gpus, dim=0)
optimizer.zero_grad()
loss=net(**inputs)
optimizer.step()
for k in loss['losses'].keys():
print (loss['losses'][k].item())
| 1.914063 | 2 |
regenesis/modelgen.py | crijke/regenesis | 16 | 3035 | <reponame>crijke/regenesis<filename>regenesis/modelgen.py
import json
from regenesis.queries import get_cubes, get_all_dimensions, get_dimensions
from pprint import pprint
def generate_dimensions():
dimensions = []
for dimension in get_all_dimensions():
pprint (dimension)
if dimension.get('measure_type').startswith('W-'):
continue
attrs = ['name', 'label']
if 'ZI' in dimension.get('measure_type'):
attrs = ['text', 'from', 'until']
dim = {
'name': dimension.get('name'),
'label': dimension.get('title_de'),
'description': dimension.get('definition_de'),
'attributes': attrs
}
dimensions.append(dim)
return dimensions
def generate_cubes():
cubes = []
for cube in get_cubes():
dimensions = []
measures = []
joins = []
mappings = {}
cube_name = cube.get('cube_name')
for dim in get_dimensions(cube_name):
dn = dim.get('dim_name')
if dim.get('dim_measure_type').startswith('W-'):
measures.append(dn)
continue
dimensions.append(dn)
if dim.get('dim_measure_type').startswith('ZI-'):
mappings[dn + '.text'] = 'fact_%s.%s' % (cube_name, dn)
mappings[dn + '.from'] = 'fact_%s.%s_from' % (cube_name, dn)
mappings[dn + '.until'] = 'fact_%s.%s_until' % (cube_name, dn)
else:
tn = 'tbl_' + dn
joins.append({
'master': dn,
'detail': 'value.value_id',
'alias': tn
})
mappings[dn + '.name'] = tn + '.name'
mappings[dn + '.label'] = tn + '.title_de'
cubes.append({
'dimensions': dimensions,
'measures': measures,
'mappings': mappings,
'joins': joins,
'fact': 'fact_%s' % cube_name,
'name': cube.get('cube_name'),
'label': cube.get('statistic_title_de'),
'description': cube.get('statistic_description_de'),
})
return cubes
def generate_model():
model = {
'dimensions': generate_dimensions(),
'cubes': generate_cubes(),
'locale': 'de'
}
pprint(model)
return model
if __name__ == '__main__':
with open('model.json', 'wb') as fh:
model = generate_model()
json.dump(model, fh, indent=2)
| 2.453125 | 2 |
tests/components/evil_genius_labs/test_light.py | liangleslie/core | 30,023 | 3036 | <gh_stars>1000+
"""Test Evil Genius Labs light."""
from unittest.mock import patch
import pytest
from homeassistant.components.light import (
ATTR_COLOR_MODE,
ATTR_SUPPORTED_COLOR_MODES,
ColorMode,
LightEntityFeature,
)
from homeassistant.const import ATTR_SUPPORTED_FEATURES
@pytest.mark.parametrize("platforms", [("light",)])
async def test_works(hass, setup_evil_genius_labs):
"""Test it works."""
state = hass.states.get("light.fibonacci256_23d4")
assert state is not None
assert state.state == "on"
assert state.attributes["brightness"] == 128
assert state.attributes[ATTR_COLOR_MODE] == ColorMode.RGB
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == [ColorMode.RGB]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == LightEntityFeature.EFFECT
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_on_color(hass, setup_evil_genius_labs):
"""Test turning on with a color."""
with patch(
"pyevilgenius.EvilGeniusDevice.set_path_value"
) as mock_set_path_value, patch(
"pyevilgenius.EvilGeniusDevice.set_rgb_color"
) as mock_set_rgb_color:
await hass.services.async_call(
"light",
"turn_on",
{
"entity_id": "light.fibonacci256_23d4",
"brightness": 100,
"rgb_color": (10, 20, 30),
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 2
mock_set_path_value.mock_calls[0][1] == ("brightness", 100)
mock_set_path_value.mock_calls[1][1] == ("power", 1)
assert len(mock_set_rgb_color.mock_calls) == 1
mock_set_rgb_color.mock_calls[0][1] == (10, 20, 30)
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_on_effect(hass, setup_evil_genius_labs):
"""Test turning on with an effect."""
with patch("pyevilgenius.EvilGeniusDevice.set_path_value") as mock_set_path_value:
await hass.services.async_call(
"light",
"turn_on",
{
"entity_id": "light.fibonacci256_23d4",
"effect": "Pride Playground",
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 2
mock_set_path_value.mock_calls[0][1] == ("pattern", 4)
mock_set_path_value.mock_calls[1][1] == ("power", 1)
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_off(hass, setup_evil_genius_labs):
"""Test turning off."""
with patch("pyevilgenius.EvilGeniusDevice.set_path_value") as mock_set_path_value:
await hass.services.async_call(
"light",
"turn_off",
{
"entity_id": "light.fibonacci256_23d4",
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 1
mock_set_path_value.mock_calls[0][1] == ("power", 0)
| 2.1875 | 2 |
python_on_whales/download_binaries.py | joshbode/python-on-whales | 0 | 3037 | import platform
import shutil
import tempfile
import warnings
from pathlib import Path
import requests
from tqdm import tqdm
DOCKER_VERSION = "20.10.5"
BUILDX_VERSION = "0.5.1"
CACHE_DIR = Path.home() / ".cache" / "python-on-whales"
TEMPLATE_CLI = (
"https://download.docker.com/{os}/static/stable/{arch}/docker-{version}.tgz"
)
WINDOWS_CLI_URL = "https://github.com/StefanScherer/docker-cli-builder/releases/download/{version}/docker.exe"
def get_docker_binary_path_in_cache():
return CACHE_DIR / "docker-cli" / DOCKER_VERSION / "docker"
def get_docker_cli_url():
user_os = get_user_os()
if user_os == "windows":
return WINDOWS_CLI_URL.format(version=DOCKER_VERSION)
arch = get_arch_for_docker_cli_url()
return TEMPLATE_CLI.format(os=user_os, arch=arch, version=DOCKER_VERSION)
def download_docker_cli():
file_to_download = get_docker_cli_url()
extension = file_to_download.split(".")[-1]
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_dir = Path(tmp_dir)
downloaded_file_path = tmp_dir / f"docker.{extension}"
download_from_url(file_to_download, downloaded_file_path)
docker_binary_path = get_docker_binary_path_in_cache()
docker_binary_path.parent.mkdir(exist_ok=True, parents=True)
if extension == "tgz":
extract_dir = tmp_dir / "extracted"
shutil.unpack_archive(str(downloaded_file_path), str(extract_dir))
shutil.move(extract_dir / "docker" / "docker", docker_binary_path)
elif extension == "exe":
shutil.move(downloaded_file_path, docker_binary_path)
warnings.warn(
f"The docker client binary file {DOCKER_VERSION} was downloaded and put "
f"in `{docker_binary_path.absolute()}`. \n"
f"You can feel free to remove it if you wish, Python on whales will download "
f"it again if needed."
)
def download_from_url(url, dst):
try:
_download_from_url(url, dst)
except Exception as e:
raise ConnectionError(f"Error while downloading {url}") from e
def _download_from_url(url, dst):
# Streaming, so we can iterate over the response.
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(dst, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
raise ConnectionError(
f"Total size should be {total_size_in_bytes}, downloaded {progress_bar.n}"
)
def get_user_os():
user_os = platform.system()
if user_os == "Linux":
return "linux"
elif user_os == "Darwin":
return "mac"
elif user_os == "Windows":
return "windows"
else:
raise NotImplementedError(
f"Unknown OS: {user_os}, cannot determine which Docker CLI binary file to "
f"download. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues \n"
f"and in the meantime, install Docker manually to make python-on-whales "
f"work."
)
def get_arch_for_docker_cli_url():
arch = platform.architecture()[0]
# I don't know the exact list of possible architectures,
# so if a user reports a NotImplementedError, we can easily add
# his/her platform here.
arch_mapping = {
"NotImplementedError": "aarch64",
"NotImplementedError2": "armel",
"NotImplementedError3": "armhf",
"NotImplementedError4": "ppc64le",
"NotImplementedError5": "s390x",
"64bit": "x86_64",
}
try:
return arch_mapping[arch]
except KeyError:
raise NotImplementedError(
f"The architecture detected on your system is `{arch}`, the list of "
f"available architectures is {list(arch_mapping.values())}. \n"
f"Please open an issue at \n"
f"https://github.com/gabrieldemarmiesse/python-on-whales/issues "
f"and make sure to copy past this error message. \n"
f"In the meantime, install Docker manually on your system."
)
| 2.296875 | 2 |
reinvent-2019/connected-photo-booth/lambda_code/Cerebro_GetQRCode.py | chriscoombs/aws-builders-fair-projects | 0 | 3038 | import boto3
import json
import os
import logging
from contextlib import closing
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
from random import shuffle
import time
import pyqrcode
import png
__BUCKET_NAME__ = "project-cerebro"
dynamo = boto3.client('dynamodb')
logger = None
print("In initialize fn ...")
logger = logging.getLogger()
if int(os.environ['DEBUG_MODE']):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.info("Initialize: Just a test")
logger.debug("Initialize: debug a test")
def create_presigned_url(bucket_name, object_name, expiration=3600):
"""Generate a presigned URL to share an S3 object
:param bucket_name: string
:param object_name: string
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Presigned URL as string. If error, returns None.
"""
# Generate a presigned URL for the S3 object
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL
return response
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
}
# input parameters are:
# 1. image ID
# output parameters are:
# 1. generated QRCode
# workflow:
# 1. first get the image_id
# 2. confirm this exists in s3
# 3. generate a presigned URL with this s3 path
# 4. create a QR Code image with this url embedded
# 5. return the QR code stored in S3 temp.
def main(event, context):
logger.info("In main ...")
start_time = int(round(time.time() * 1000))
body_params = json.loads(event["body"])
logger.debug("Body params:")
logger.debug(body_params)
response_data = {}
# 1. get the image_id
if "image_id" in body_params:
image_id = body_params["image_id"]
# prefix and check for existence
s3_prefix = "production/%s" % image_id
# 2. check for the object in s3
s3 = boto3.resource('s3')
s3_object = s3.Object(__BUCKET_NAME__, s3_prefix)
obj_metadata = s3_object.load() # fetches metadata for the object, but not data.
logger.info("metadata found:")
logger.info(obj_metadata)
if obj_metadata:
response_data["s3_image"] = s3_prefix
# 3. generate the presigned url
presigned_url = create_presigned_url(bucket_name = __BUCKET_NAME__, object_name=s3_prefix, expiration=5*60)
logger.info("generated the presigned URL:")
logger.info(presigned_url)
if presigned_url:
response_data["presigned_url"] = presigned_url
logger.info("assigned presigned url")
# 4. generate the qrcode, convert to png
url = pyqrcode.create(presigned_url)
url.png('/tmp/code.png', scale=5)
logger.info("Created a png file by now!")
# 5. save to s3
target_file='/tmp/code.png'
qrcode_key = "qrcodes/current_qrcode.png"
logger.info("Now trying to put s3 object ...")
# Create an S3 client
s3 = boto3.client('s3')
response = s3.put_object(
Body=open(target_file, 'rb'),
Bucket=__BUCKET_NAME__,
Key=qrcode_key)
logger.info("Now trying to put s3 object - completed!")
response_data["qrcode_key"] = qrcode_key
else:
response_data["result"] = "Failure"
return respond(None, response_data)
end_time = int(round(time.time() * 1000))
logger.info("Time Taken: %f" % (end_time - start_time))
logger.info("Done with main!")
response_data["result"] = "Success"
response_data["time_taken"] = str(end_time - start_time)
return respond(None, response_data)
def lambda_handler(event, context):
return main(event, context)
| 2.15625 | 2 |
dependencies/svgwrite/tests/test_drawing.py | charlesmchen/typefacet | 21 | 3039 | <filename>dependencies/svgwrite/tests/test_drawing.py
#!/usr/bin/env python
#coding:utf-8
# Author: mozman --<<EMAIL>>
# Purpose: test drawing module
# Created: 11.09.2010
# Copyright (C) 2010, <NAME>
# License: GPLv3
from __future__ import unicode_literals
import os
import unittest
from io import StringIO
from svgwrite.drawing import Drawing
from svgwrite.container import Group
class TestDrawingFullProfile(unittest.TestCase):
def test_empty_drawing(self):
dwg = Drawing()
result = dwg.tostring()
self.assertEqual(result, '<svg baseProfile="full" height="100%" version="1.1" '\
'width="100%" xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
def test_stylesheet(self):
dwg = Drawing()
dwg.add_stylesheet('test.css', 'Test')
f = StringIO()
dwg.write(f)
result = f.getvalue()
f.close()
self.assertEqual(result, '<?xml version="1.0" encoding="utf-8" ?>\n' \
'<?xml-stylesheet href="test.css" type="text/css" title="Test" alternate="no" media="screen"?>\n'
'<svg baseProfile="full" height="100%" version="1.1" width="100%" '\
'xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
def test_save(self):
fn = 'test_drawing.svg'
if os.path.exists(fn):
os.remove(fn)
dwg = Drawing(fn)
dwg.save()
self.assertTrue(os.path.exists(fn))
os.remove(fn)
def test_save_as(self):
fn = 'test_drawing.svg'
if os.path.exists(fn):
os.remove(fn)
dwg = Drawing()
dwg.saveas(fn)
self.assertTrue(os.path.exists(fn))
os.remove(fn)
def test_non_us_ascii_chars(self):
dwg = Drawing()
dwg.set_desc('öäü')
f = StringIO()
dwg.write(f)
result = f.getvalue()
f.close()
self.assertEqual(result,
'<?xml version="1.0" encoding="utf-8" ?>\n' \
'<svg baseProfile="full" height="100%" version="1.1" width="100%" '\
'xmlns="http://www.w3.org/2000/svg" xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink">'
'<title>öäü</title><defs /></svg>')
class TestDrawingTinyProfile(unittest.TestCase):
def test_empty_drawing(self):
dwg = Drawing(profile="tiny")
result = dwg.tostring()
self.assertEqual(result, '<svg baseProfile="tiny" height="100%" version="1.2" '\
'width="100%" xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
def test_stylesheet(self):
dwg = Drawing(profile="tiny")
dwg.add_stylesheet('test.css', 'Test')
f = StringIO()
dwg.write(f)
result = f.getvalue()
f.close()
self.assertEqual(result, '<?xml version="1.0" encoding="utf-8" ?>\n' \
'<?xml-stylesheet href="test.css" type="text/css" title="Test" alternate="no" media="screen"?>\n'
'<svg baseProfile="tiny" height="100%" version="1.2" width="100%" '\
'xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink"><defs /></svg>')
class TestDefs(unittest.TestCase):
def test_simple_defs(self):
dwg = Drawing()
g = dwg.defs.add(Group(id='test'))
inner_g = g.add(Group(id='innerTest'))
result = dwg.tostring()
self.assertEqual(result, '<svg baseProfile="full" height="100%" version="1.1" '\
'width="100%" xmlns="http://www.w3.org/2000/svg" '\
'xmlns:ev="http://www.w3.org/2001/xml-events" '\
'xmlns:xlink="http://www.w3.org/1999/xlink">' \
'<defs><g id="test"><g id="innerTest" /></g></defs></svg>')
if __name__=='__main__':
unittest.main()
| 2.3125 | 2 |
src/cms/views/error_handler/error_handler.py | digitalfabrik/coldaid-backend | 4 | 3040 | <reponame>digitalfabrik/coldaid-backend<gh_stars>1-10
from django.shortcuts import render
from django.utils.translation import ugettext as _
# pylint: disable=unused-argument
def handler400(request, exception):
ctx = {'code': 400, 'title': _('Bad request'),
'message': _('There was an error in your request.')}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 400
return response
# pylint: disable=unused-argument
def handler403(request, exception):
ctx = {'code': 403, 'title': _('Forbidden'),
'message': _("You don't have the permission to access this page.")}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 403
return response
# pylint: disable=unused-argument
def handler404(request, exception):
ctx = {'code': 404, 'title': _('Page not found'),
'message': _('The page you requested could not be found.')}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 404
return response
# pylint: disable=unused-argument
def handler500(request):
ctx = {'code': 500, 'title': _('Internal Server Error'),
'message': _('An unexpected error has occurred.')}
response = render(request, 'error_handler/http_error.html', ctx)
response.status_code = 500
return response
# pylint: disable=unused-argument
def csrf_failure(request, reason):
return render(request, 'error_handler/csrf_failure.html')
| 2.15625 | 2 |
examples/ex3/app/models.py | trym-inc/django-msg | 7 | 3041 | from typing import NamedTuple
from django.contrib.auth.models import AbstractUser
from django.db import models
from msg.models import Msg
class User(AbstractUser):
phone_number: 'str' = models.CharField(max_length=255,
null=True, blank=True)
class HelloSMSMessage(NamedTuple):
phone_number: 'str'
username: 'str'
def send_hello_sms(self):
if not self.phone_number:
raise ValueError('User has to have a phone number'
'to send a sms message.')
hello = self.HelloSMSMessage(
username=self.username,
phone_number=self.phone_number,
)
Msg.new(hello, dispatch_now=True)
| 2.78125 | 3 |
Data_Structures/2d_array_ds.py | csixteen/HackerRank | 4 | 3042 | <gh_stars>1-10
matrix = [list(map(int, input().split())) for _ in range(6)]
max_sum = None
for i in range(4):
for j in range(4):
s = sum(matrix[i][j:j+3]) + matrix[i+1][j+1] + sum(matrix[i+2][j:j+3])
if max_sum is None or s > max_sum:
max_sum = s
print(max_sum)
| 2.875 | 3 |
sklearn/utils/_bunch.py | jlopezNEU/scikit-learn | 3 | 3043 | <filename>sklearn/utils/_bunch.py
class Bunch(dict):
"""Container object exposing keys as attributes.
Bunch objects are sometimes used as an output for functions and methods.
They extend dictionaries by enabling values to be accessed by key,
`bunch["value_key"]`, or by an attribute, `bunch.value_key`.
Examples
--------
>>> from sklearn.utils import Bunch
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
| 3.65625 | 4 |
tfx/examples/chicago_taxi_pipeline/serving/chicago_taxi_client.py | pingsutw/tfx | 1 | 3044 | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client for the chicago_taxi demo."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import base64
import json
import os
import subprocess
import tempfile
import requests
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.platform import app # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
from tfx.utils import io_utils
_LOCAL_INFERENCE_TIMEOUT_SECONDS = 5.0
_LABEL_KEY = 'tips'
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _make_proto_coder(schema):
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _make_csv_coder(schema, column_names):
"""Return a coder for tf.transform to read csv files."""
raw_feature_spec = _get_raw_feature_spec(schema)
parsing_schema = dataset_schema.from_feature_spec(raw_feature_spec)
return tft_coders.CsvCoder(column_names, parsing_schema)
def _read_schema(path):
"""Reads a schema from the provided location.
Args:
path: The location of the file holding a serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None
"""
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result
def _do_local_inference(host, port, serialized_examples):
"""Performs inference on a model hosted by the host:port server."""
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/chicago_taxi:predict'
response = requests.post(
server_url, data=json_request, timeout=_LOCAL_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
def _do_aiplatform_inference(model, version, serialized_examples):
"""Performs inference on the model:version in AI Platform."""
working_dir = tempfile.mkdtemp()
instances_file = os.path.join(working_dir, 'test.json')
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the example in:
# https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py
json_examples.append('{ "inputs": { "b64": "%s" } }' %
base64.b64encode(serialized_example).decode('utf-8'))
file_io.write_string_to_file(instances_file, '\n'.join(json_examples))
gcloud_command = [
'gcloud', 'ai-platform', 'predict', '--model', model, '--version',
version, '--json-instances', instances_file
]
print(subprocess.check_output(gcloud_command))
def _do_inference(model_handle, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
model_handle: handle to the model. This can be either
"aiplatform:model:version" or "host:port"
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: a Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
column_names = io_utils.load_csv_column_names(examples_file)
csv_coder = _make_csv_coder(schema, column_names)
proto_coder = _make_proto_coder(schema)
input_file = open(examples_file, 'r')
input_file.readline() # skip header line
serialized_examples = []
for _ in range(num_examples):
one_line = input_file.readline()
if not one_line:
print('End of example file reached')
break
one_example = csv_coder.decode(one_line)
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_model_handle = model_handle.split(':')
if parsed_model_handle[0] == 'aiplatform':
_do_aiplatform_inference(
model=parsed_model_handle[1],
version=parsed_model_handle[2],
serialized_examples=serialized_examples)
else:
_do_local_inference(
host=parsed_model_handle[0],
port=parsed_model_handle[1],
serialized_examples=serialized_examples)
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--num_examples',
help=('Number of examples to send to the server.'),
default=1,
type=int)
parser.add_argument(
'--server',
help=('Prediction service host:port or aiplatform:model:version'),
required=True)
parser.add_argument(
'--examples_file',
help=('Path to csv file containing examples.'),
required=True)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
known_args, _ = parser.parse_known_args()
_do_inference(known_args.server, known_args.examples_file,
known_args.num_examples, _read_schema(known_args.schema_file))
if __name__ == '__main__':
app.run(main)
| 1.6875 | 2 |
PyVideo/main.py | BlackIQ/Cute | 5 | 3045 | <gh_stars>1-10
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Q_ARG, QAbstractItemModel,
QFileInfo, qFuzzyCompare, QMetaObject, QModelIndex, QObject, Qt,
QThread, QTime, QUrl)
from PyQt5.QtGui import QColor, qGray, QImage, QPainter, QPalette
from PyQt5.QtMultimedia import (QAbstractVideoBuffer, QMediaContent,
QMediaMetaData, QMediaPlayer, QMediaPlaylist, QVideoFrame, QVideoProbe)
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import (QApplication, QComboBox, QDialog, QFileDialog,
QFormLayout, QHBoxLayout, QLabel, QListView, QMessageBox, QPushButton,
QSizePolicy, QSlider, QStyle, QToolButton, QVBoxLayout, QWidget)
class VideoWidget(QVideoWidget):
def __init__(self, parent=None):
super(VideoWidget, self).__init__(parent)
self.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
p = self.palette()
p.setColor(QPalette.Window, Qt.black)
self.setPalette(p)
self.setAttribute(Qt.WA_OpaquePaintEvent)
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape and self.isFullScreen():
self.setFullScreen(False)
event.accept()
elif event.key() == Qt.Key_Enter and event.modifiers() & Qt.Key_Alt:
self.setFullScreen(not self.isFullScreen())
event.accept()
else:
super(VideoWidget, self).keyPressEvent(event)
def mouseDoubleClickEvent(self, event):
self.setFullScreen(not self.isFullScreen())
event.accept()
class PlaylistModel(QAbstractItemModel):
Title, ColumnCount = range(2)
def __init__(self, parent=None):
super(PlaylistModel, self).__init__(parent)
self.m_playlist = None
def rowCount(self, parent=QModelIndex()):
return self.m_playlist.mediaCount() if self.m_playlist is not None and not parent.isValid() else 0
def columnCount(self, parent=QModelIndex()):
return self.ColumnCount if not parent.isValid() else 0
def index(self, row, column, parent=QModelIndex()):
return self.createIndex(row, column) if self.m_playlist is not None and not parent.isValid() and row >= 0 and row < self.m_playlist.mediaCount() and column >= 0 and column < self.ColumnCount else QModelIndex()
def parent(self, child):
return QModelIndex()
def data(self, index, role=Qt.DisplayRole):
if index.isValid() and role == Qt.DisplayRole:
if index.column() == self.Title:
location = self.m_playlist.media(index.row()).canonicalUrl()
return QFileInfo(location.path()).fileName()
return self.m_data[index]
return None
def playlist(self):
return self.m_playlist
def setPlaylist(self, playlist):
if self.m_playlist is not None:
self.m_playlist.mediaAboutToBeInserted.disconnect(
self.beginInsertItems)
self.m_playlist.mediaInserted.disconnect(self.endInsertItems)
self.m_playlist.mediaAboutToBeRemoved.disconnect(
self.beginRemoveItems)
self.m_playlist.mediaRemoved.disconnect(self.endRemoveItems)
self.m_playlist.mediaChanged.disconnect(self.changeItems)
self.beginResetModel()
self.m_playlist = playlist
if self.m_playlist is not None:
self.m_playlist.mediaAboutToBeInserted.connect(
self.beginInsertItems)
self.m_playlist.mediaInserted.connect(self.endInsertItems)
self.m_playlist.mediaAboutToBeRemoved.connect(
self.beginRemoveItems)
self.m_playlist.mediaRemoved.connect(self.endRemoveItems)
self.m_playlist.mediaChanged.connect(self.changeItems)
self.endResetModel()
def beginInsertItems(self, start, end):
self.beginInsertRows(QModelIndex(), start, end)
def endInsertItems(self):
self.endInsertRows()
def beginRemoveItems(self, start, end):
self.beginRemoveRows(QModelIndex(), start, end)
def endRemoveItems(self):
self.endRemoveRows()
def changeItems(self, start, end):
self.dataChanged.emit(self.index(start, 0),
self.index(end, self.ColumnCount))
class PlayerControls(QWidget):
play = pyqtSignal()
pause = pyqtSignal()
stop = pyqtSignal()
next = pyqtSignal()
previous = pyqtSignal()
changeVolume = pyqtSignal(int)
changeMuting = pyqtSignal(bool)
changeRate = pyqtSignal(float)
def __init__(self, parent=None):
super(PlayerControls, self).__init__(parent)
self.playerState = QMediaPlayer.StoppedState
self.playerMuted = False
self.playButton = QToolButton(clicked=self.playClicked)
self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay))
self.stopButton = QToolButton(clicked=self.stop)
self.stopButton.setIcon(self.style().standardIcon(QStyle.SP_MediaStop))
self.stopButton.setEnabled(False)
self.nextButton = QToolButton(clicked=self.next)
self.nextButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaSkipForward))
self.previousButton = QToolButton(clicked=self.previous)
self.previousButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaSkipBackward))
self.muteButton = QToolButton(clicked=self.muteClicked)
self.muteButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaVolume))
self.volumeSlider = QSlider(Qt.Horizontal,
sliderMoved=self.changeVolume)
self.volumeSlider.setRange(0, 100)
self.rateBox = QComboBox(activated=self.updateRate)
self.rateBox.addItem("0.5x", 0.5)
self.rateBox.addItem("1.0x", 1.0)
self.rateBox.addItem("2.0x", 2.0)
self.rateBox.setCurrentIndex(1)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.stopButton)
layout.addWidget(self.previousButton)
layout.addWidget(self.playButton)
layout.addWidget(self.nextButton)
layout.addWidget(self.muteButton)
layout.addWidget(self.volumeSlider)
layout.addWidget(self.rateBox)
self.setLayout(layout)
def state(self):
return self.playerState
def setState(self,state):
if state != self.playerState:
self.playerState = state
if state == QMediaPlayer.StoppedState:
self.stopButton.setEnabled(False)
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
elif state == QMediaPlayer.PlayingState:
self.stopButton.setEnabled(True)
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPause))
elif state == QMediaPlayer.PausedState:
self.stopButton.setEnabled(True)
self.playButton.setIcon(
self.style().standardIcon(QStyle.SP_MediaPlay))
def volume(self):
return self.volumeSlider.value()
def setVolume(self, volume):
self.volumeSlider.setValue(volume)
def isMuted(self):
return self.playerMuted
def setMuted(self, muted):
if muted != self.playerMuted:
self.playerMuted = muted
self.muteButton.setIcon(
self.style().standardIcon(
QStyle.SP_MediaVolumeMuted if muted else QStyle.SP_MediaVolume))
def playClicked(self):
if self.playerState in (QMediaPlayer.StoppedState, QMediaPlayer.PausedState):
self.play.emit()
elif self.playerState == QMediaPlayer.PlayingState:
self.pause.emit()
def muteClicked(self):
self.changeMuting.emit(not self.playerMuted)
def playbackRate(self):
return self.rateBox.itemData(self.rateBox.currentIndex())
def setPlaybackRate(self, rate):
for i in range(self.rateBox.count()):
if qFuzzyCompare(rate, self.rateBox.itemData(i)):
self.rateBox.setCurrentIndex(i)
return
self.rateBox.addItem("%dx" % rate, rate)
self.rateBox.setCurrentIndex(self.rateBox.count() - 1)
def updateRate(self):
self.changeRate.emit(self.playbackRate())
class FrameProcessor(QObject):
histogramReady = pyqtSignal(list)
@pyqtSlot(QVideoFrame, int)
def processFrame(self, frame, levels):
histogram = [0.0] * levels
if levels and frame.map(QAbstractVideoBuffer.ReadOnly):
pixelFormat = frame.pixelFormat()
if pixelFormat == QVideoFrame.Format_YUV420P or pixelFormat == QVideoFrame.Format_NV12:
# Process YUV data.
bits = frame.bits()
for idx in range(frame.height() * frame.width()):
histogram[(bits[idx] * levels) >> 8] += 1.0
else:
imageFormat = QVideoFrame.imageFormatFromPixelFormat(pixelFormat)
if imageFormat != QImage.Format_Invalid:
# Process RGB data.
image = QImage(frame.bits(), frame.width(), frame.height(), imageFormat)
for y in range(image.height()):
for x in range(image.width()):
pixel = image.pixel(x, y)
histogram[(qGray(pixel) * levels) >> 8] += 1.0
# Find the maximum value.
maxValue = 0.0
for value in histogram:
if value > maxValue:
maxValue = value
# Normalise the values between 0 and 1.
if maxValue > 0.0:
for i in range(len(histogram)):
histogram[i] /= maxValue
frame.unmap()
self.histogramReady.emit(histogram)
class HistogramWidget(QWidget):
def __init__(self, parent=None):
super(HistogramWidget, self).__init__(parent)
self.m_levels = 128
self.m_isBusy = False
self.m_histogram = []
self.m_processor = FrameProcessor()
self.m_processorThread = QThread()
self.m_processor.moveToThread(self.m_processorThread)
self.m_processor.histogramReady.connect(self.setHistogram)
def __del__(self):
self.m_processorThread.quit()
self.m_processorThread.wait(10000)
def setLevels(self, levels):
self.m_levels = levels
def processFrame(self, frame):
if self.m_isBusy:
return
self.m_isBusy = True
QMetaObject.invokeMethod(self.m_processor, 'processFrame',
Qt.QueuedConnection, Q_ARG(QVideoFrame, frame),
Q_ARG(int, self.m_levels))
@pyqtSlot(list)
def setHistogram(self, histogram):
self.m_isBusy = False
self.m_histogram = list(histogram)
self.update()
def paintEvent(self, event):
painter = QPainter(self)
if len(self.m_histogram) == 0:
painter.fillRect(0, 0, self.width(), self.height(),
QColor.fromRgb(0, 0, 0))
return
barWidth = self.width() / float(len(self.m_histogram))
for i, value in enumerate(self.m_histogram):
h = value * self.height()
# Draw the level.
painter.fillRect(barWidth * i, self.height() - h,
barWidth * (i + 1), self.height(), Qt.red)
# Clear the rest of the control.
painter.fillRect(barWidth * i, 0, barWidth * (i + 1),
self.height() - h, Qt.black)
class Player(QWidget):
fullScreenChanged = pyqtSignal(bool)
def __init__(self, playlist, parent=None):
super(Player, self).__init__(parent)
self.colorDialog = None
self.trackInfo = ""
self.statusInfo = ""
self.duration = 0
self.player = QMediaPlayer()
self.playlist = QMediaPlaylist()
self.player.setPlaylist(self.playlist)
self.player.durationChanged.connect(self.durationChanged)
self.player.positionChanged.connect(self.positionChanged)
self.player.metaDataChanged.connect(self.metaDataChanged)
self.playlist.currentIndexChanged.connect(self.playlistPositionChanged)
self.player.mediaStatusChanged.connect(self.statusChanged)
self.player.bufferStatusChanged.connect(self.bufferingProgress)
self.player.videoAvailableChanged.connect(self.videoAvailableChanged)
self.player.error.connect(self.displayErrorMessage)
self.videoWidget = VideoWidget()
self.player.setVideoOutput(self.videoWidget)
self.playlistModel = PlaylistModel()
self.playlistModel.setPlaylist(self.playlist)
self.playlistView = QListView()
self.playlistView.setModel(self.playlistModel)
self.playlistView.setCurrentIndex(
self.playlistModel.index(self.playlist.currentIndex(), 0))
self.playlistView.activated.connect(self.jump)
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(0, self.player.duration() / 1000)
self.labelDuration = QLabel()
self.slider.sliderMoved.connect(self.seek)
self.labelHistogram = QLabel()
self.labelHistogram.setText("Histogram:")
self.histogram = HistogramWidget()
histogramLayout = QHBoxLayout()
histogramLayout.addWidget(self.labelHistogram)
histogramLayout.addWidget(self.histogram, 1)
self.probe = QVideoProbe()
self.probe.videoFrameProbed.connect(self.histogram.processFrame)
self.probe.setSource(self.player)
openButton = QPushButton("Open", clicked=self.open)
controls = PlayerControls()
controls.setState(self.player.state())
controls.setVolume(self.player.volume())
controls.setMuted(controls.isMuted())
controls.play.connect(self.player.play)
controls.pause.connect(self.player.pause)
controls.stop.connect(self.player.stop)
controls.next.connect(self.playlist.next)
controls.previous.connect(self.previousClicked)
controls.changeVolume.connect(self.player.setVolume)
controls.changeMuting.connect(self.player.setMuted)
controls.changeRate.connect(self.player.setPlaybackRate)
controls.stop.connect(self.videoWidget.update)
self.player.stateChanged.connect(controls.setState)
self.player.volumeChanged.connect(controls.setVolume)
self.player.mutedChanged.connect(controls.setMuted)
self.fullScreenButton = QPushButton("FullScreen")
self.fullScreenButton.setCheckable(True)
self.colorButton = QPushButton("Color Options...")
self.colorButton.setEnabled(False)
self.colorButton.clicked.connect(self.showColorDialog)
displayLayout = QHBoxLayout()
displayLayout.addWidget(self.videoWidget, 2)
displayLayout.addWidget(self.playlistView)
controlLayout = QHBoxLayout()
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(openButton)
controlLayout.addStretch(1)
controlLayout.addWidget(controls)
controlLayout.addStretch(1)
controlLayout.addWidget(self.fullScreenButton)
controlLayout.addWidget(self.colorButton)
layout = QVBoxLayout()
layout.addLayout(displayLayout)
hLayout = QHBoxLayout()
hLayout.addWidget(self.slider)
hLayout.addWidget(self.labelDuration)
layout.addLayout(hLayout)
layout.addLayout(controlLayout)
layout.addLayout(histogramLayout)
self.setLayout(layout)
if not self.player.isAvailable():
QMessageBox.warning(self, "Service not available",
"The QMediaPlayer object does not have a valid service.\n"
"Please check the media service plugins are installed.")
controls.setEnabled(False)
self.playlistView.setEnabled(False)
openButton.setEnabled(False)
self.colorButton.setEnabled(False)
self.fullScreenButton.setEnabled(False)
self.metaDataChanged()
self.addToPlaylist(playlist)
def open(self):
fileNames, _ = QFileDialog.getOpenFileNames(self, "Open Files")
self.addToPlaylist(fileNames)
def addToPlaylist(self, fileNames):
for name in fileNames:
fileInfo = QFileInfo(name)
if fileInfo.exists():
url = QUrl.fromLocalFile(fileInfo.absoluteFilePath())
if fileInfo.suffix().lower() == 'm3u':
self.playlist.load(url)
else:
self.playlist.addMedia(QMediaContent(url))
else:
url = QUrl(name)
if url.isValid():
self.playlist.addMedia(QMediaContent(url))
def durationChanged(self, duration):
duration /= 1000
self.duration = duration
self.slider.setMaximum(duration)
def positionChanged(self, progress):
progress /= 1000
if not self.slider.isSliderDown():
self.slider.setValue(progress)
self.updateDurationInfo(progress)
def metaDataChanged(self):
if self.player.isMetaDataAvailable():
self.setTrackInfo("%s - %s" % (
self.player.metaData(QMediaMetaData.AlbumArtist),
self.player.metaData(QMediaMetaData.Title)))
def previousClicked(self):
# Go to the previous track if we are within the first 5 seconds of
# playback. Otherwise, seek to the beginning.
if self.player.position() <= 5000:
self.playlist.previous()
else:
self.player.setPosition(0)
def jump(self, index):
if index.isValid():
self.playlist.setCurrentIndex(index.row())
self.player.play()
def playlistPositionChanged(self, position):
self.playlistView.setCurrentIndex(
self.playlistModel.index(position, 0))
def seek(self, seconds):
self.player.setPosition(seconds * 1000)
def statusChanged(self, status):
self.handleCursor(status)
if status == QMediaPlayer.LoadingMedia:
self.setStatusInfo("Loading...")
elif status == QMediaPlayer.StalledMedia:
self.setStatusInfo("Media Stalled")
elif status == QMediaPlayer.EndOfMedia:
QApplication.alert(self)
elif status == QMediaPlayer.InvalidMedia:
self.displayErrorMessage()
else:
self.setStatusInfo("")
def handleCursor(self, status):
if status in (QMediaPlayer.LoadingMedia, QMediaPlayer.BufferingMedia, QMediaPlayer.StalledMedia):
self.setCursor(Qt.BusyCursor)
else:
self.unsetCursor()
def bufferingProgress(self, progress):
self.setStatusInfo("Buffering %d%" % progress)
def videoAvailableChanged(self, available):
if available:
self.fullScreenButton.clicked.connect(
self.videoWidget.setFullScreen)
self.videoWidget.fullScreenChanged.connect(
self.fullScreenButton.setChecked)
if self.fullScreenButton.isChecked():
self.videoWidget.setFullScreen(True)
else:
self.fullScreenButton.clicked.disconnect(
self.videoWidget.setFullScreen)
self.videoWidget.fullScreenChanged.disconnect(
self.fullScreenButton.setChecked)
self.videoWidget.setFullScreen(False)
self.colorButton.setEnabled(available)
def setTrackInfo(self, info):
self.trackInfo = info
if self.statusInfo != "":
self.setWindowTitle("%s | %s" % (self.trackInfo, self.statusInfo))
else:
self.setWindowTitle(self.trackInfo)
def setStatusInfo(self, info):
self.statusInfo = info
if self.statusInfo != "":
self.setWindowTitle("%s | %s" % (self.trackInfo, self.statusInfo))
else:
self.setWindowTitle(self.trackInfo)
def displayErrorMessage(self):
self.setStatusInfo(self.player.errorString())
def updateDurationInfo(self, currentInfo):
duration = self.duration
if currentInfo or duration:
currentTime = QTime((currentInfo/3600)%60, (currentInfo/60)%60,
currentInfo%60, (currentInfo*1000)%1000)
totalTime = QTime((duration/3600)%60, (duration/60)%60,
duration%60, (duration*1000)%1000);
format = 'hh:mm:ss' if duration > 3600 else 'mm:ss'
tStr = currentTime.toString(format) + " / " + totalTime.toString(format)
else:
tStr = ""
self.labelDuration.setText(tStr)
def showColorDialog(self):
if self.colorDialog is None:
brightnessSlider = QSlider(Qt.Horizontal)
brightnessSlider.setRange(-100, 100)
brightnessSlider.setValue(self.videoWidget.brightness())
brightnessSlider.sliderMoved.connect(
self.videoWidget.setBrightness)
self.videoWidget.brightnessChanged.connect(
brightnessSlider.setValue)
contrastSlider = QSlider(Qt.Horizontal)
contrastSlider.setRange(-100, 100)
contrastSlider.setValue(self.videoWidget.contrast())
contrastSlider.sliderMoved.connect(self.videoWidget.setContrast)
self.videoWidget.contrastChanged.connect(contrastSlider.setValue)
hueSlider = QSlider(Qt.Horizontal)
hueSlider.setRange(-100, 100)
hueSlider.setValue(self.videoWidget.hue())
hueSlider.sliderMoved.connect(self.videoWidget.setHue)
self.videoWidget.hueChanged.connect(hueSlider.setValue)
saturationSlider = QSlider(Qt.Horizontal)
saturationSlider.setRange(-100, 100)
saturationSlider.setValue(self.videoWidget.saturation())
saturationSlider.sliderMoved.connect(
self.videoWidget.setSaturation)
self.videoWidget.saturationChanged.connect(
saturationSlider.setValue)
layout = QFormLayout()
layout.addRow("Brightness", brightnessSlider)
layout.addRow("Contrast", contrastSlider)
layout.addRow("Hue", hueSlider)
layout.addRow("Saturation", saturationSlider)
button = QPushButton("Close")
layout.addRow(button)
self.colorDialog = QDialog(self)
self.colorDialog.setWindowTitle("Color Options")
self.colorDialog.setLayout(layout)
button.clicked.connect(self.colorDialog.close)
self.colorDialog.show()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
player = Player(sys.argv[1:])
player.show()
sys.exit(app.exec_())
| 1.976563 | 2 |
tests/snapshot/periodic.py | Uornca/mirheo | 22 | 3046 | #!/usr/bin/env python
"""Test checkpoint-like periodic snapshots.
We test that there are that many folders and that the currentStep changes.
"""
import mirheo as mir
u = mir.Mirheo(nranks=(1, 1, 1), domain=(4, 6, 8), debug_level=3,
log_filename='log', no_splash=True,
checkpoint_every=10, checkpoint_mode='Incremental',
checkpoint_folder='periodic_snapshots/snapshot_', checkpoint_mechanism='Snapshot')
pv = mir.ParticleVectors.ParticleVector('pv', mass=1)
ic = mir.InitialConditions.Uniform(number_density=2)
u.registerParticleVector(pv, ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind='DPD', a=10.0, gamma=10.0, kBT=1.0, power=0.5)
lj = mir.Interactions.Pairwise('lj', rc=1.0, kind='LJ', epsilon=1.25, sigma=0.75)
u.registerInteraction(dpd)
u.registerInteraction(lj)
u.setInteraction(dpd, pv, pv)
minimize = mir.Integrators.Minimize('minimize', max_displacement=1. / 1024)
u.registerIntegrator(minimize)
u.run(45, dt=0.125)
# TEST: snapshot.periodic
# cd snapshot
# rm -rf periodic_snapshots/
# mir.run --runargs "-n 2" ./periodic.py
# ls periodic_snapshots | cat > snapshot.out.txt
# grep -rH --include=*.json currentStep periodic_snapshots/ | sort >> snapshot.out.txt
| 2.234375 | 2 |
tools/resource_prefetch_predictor/generate_database.py | xzhan96/chromium.src | 1 | 3047 | <filename>tools/resource_prefetch_predictor/generate_database.py
#!/usr/bin/python
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Loads a set of web pages several times on a device, and extracts the
predictor database.
"""
import argparse
import logging
import os
import sys
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir))
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
sys.path.append(os.path.join(_SRC_PATH, 'build', 'android'))
import devil_chromium
sys.path.append(os.path.join(_SRC_PATH, 'tools', 'android', 'loading'))
import controller
from options import OPTIONS
import page_track
_PAGE_LOAD_TIMEOUT = 20
def _CreateArgumentParser():
"""Creates and returns the argument parser."""
parser = argparse.ArgumentParser(
description=('Loads a set of web pages several times on a device, and '
'extracts the predictor database.'),
parents=[OPTIONS.GetParentParser()])
parser.add_argument('--device', help='Device ID')
parser.add_argument('--urls_filename', help='File containing a list of URLs '
'(one per line). URLs can be repeated.')
parser.add_argument('--output_filename',
help='File to store the database in.')
parser.add_argument('--url_repeat',
help=('Number of times each URL in the input '
'file is loaded.'),
default=3)
return parser
def _FindDevice(device_id):
"""Returns a device matching |device_id| or the first one if None, or None."""
devices = device_utils.DeviceUtils.HealthyDevices()
if device_id is None:
return devices[0]
matching_devices = [d for d in devices if str(d) == device_id]
if not matching_devices:
return None
return matching_devices[0]
def _Setup(device):
"""Sets up a device and returns an instance of RemoteChromeController."""
chrome_controller = controller.RemoteChromeController(device)
device.ForceStop(OPTIONS.ChromePackage().package)
chrome_controller.AddChromeArguments(
['--speculative-resource-prefetching=learning'])
chrome_controller.ResetBrowserState()
return chrome_controller
def _Go(chrome_controller, urls_filename, output_filename, repeats):
urls = []
with open(urls_filename) as f:
urls = [line.strip() for line in f.readlines()]
with chrome_controller.Open() as connection:
for repeat in range(repeats):
logging.info('Repeat #%d', repeat)
for url in urls:
logging.info('\tLoading %s', url)
page_track.PageTrack(connection) # Registers the listeners.
connection.MonitorUrl(url, timeout_seconds=_PAGE_LOAD_TIMEOUT,
stop_delay_multiplier=1.5)
device = chrome_controller.GetDevice()
device.ForceStop(OPTIONS.ChromePackage().package)
database_filename = (
'/data/user/0/%s/app_chrome/Default/Network Action Predictor' %
OPTIONS.ChromePackage().package)
device.PullFile(database_filename, output_filename)
def main():
logging.basicConfig(level=logging.INFO)
parser = _CreateArgumentParser()
args = parser.parse_args()
OPTIONS.SetParsedArgs(args)
devil_chromium.Initialize()
device = _FindDevice(args.device)
if device is None:
logging.error('Could not find device: %s.', args.device)
sys.exit(1)
chrome_controller = _Setup(device)
_Go(chrome_controller, args.urls_filename, args.output_filename,
int(args.url_repeat))
if __name__ == '__main__':
main()
| 2.421875 | 2 |
palm_wrapper/job_submission/domain.py | madeline-scyphers/palm | 0 | 3048 | from abc import ABC, abstractmethod
from typing import Optional
from xml import dom
import numpy as np
import pandas as pd
from .utils import get_factors_rev
def calc_plot_size(domain_x, domain_y, plot_goal, house_goal):
f1 = sorted(get_factors_rev(domain_x))
f2 = sorted(get_factors_rev(domain_y))
plot_x, plot_y = None, None
for x in f1:
for y in f2:
if x * y - house_goal >= 0 and plot_goal - x * y >= 0:
if not plot_x and not plot_y:
plot_x, plot_y = x, y
if (plot_goal - x * y) < (plot_goal - plot_x * plot_y):
plot_x, plot_y = x, y
elif ((plot_goal - x * y) == (plot_goal - plot_x * plot_y)) and ((x - y) < (plot_x - plot_y)):
plot_x, plot_y = x, y
return plot_x, plot_y
def calc_plot_sizes(
domain_x, domain_y, plot_footprint, house_footprint, plot_ratio, dx, dy, full_domain, x_spread=None, y_spread=None
):
x_spread = x_spread if x_spread is not None else (-round(domain_x / 15), 0)
y_spread = (
y_spread if y_spread is not None else (-round(domain_y / 20), min(full_domain - domain_y, round(domain_y / 10)))
)
goal = plot_footprint / (dx * dy)
house_goal = house_footprint / (dx * dy)
dom_x = range(domain_x + x_spread[0], domain_x + x_spread[1] + 1)
dom_y = range(domain_y + y_spread[0], domain_y + y_spread[1] + 1)
plots = []
for d_x in dom_x:
for d_y in dom_y:
trimmed_d_y = int(d_y * plot_ratio)
plot_x, plot_y = calc_plot_size(d_x, trimmed_d_y, goal, house_goal)
if plot_x is not None and plot_y is not None:
plots.append((plot_x, plot_y, d_x, d_y, trimmed_d_y))
return plots
def get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy):
goal = plot_footprint / (dx * dy)
tmp = pd.DataFrame(plots, columns=["px", "py", "domx", "domy", "trimmed_dy"])
tmp["plt_area"] = tmp["px"] * tmp["py"]
tmp["goal_diff"] = goal - tmp.plt_area
tmp["domain_y_diff"] = tmp.domy * plot_ratio - tmp.trimmed_dy
tmp["trimmed_area"] = tmp["domx"] * tmp["trimmed_dy"]
tmp["full_domain"] = tmp["domx"] * tmp["domy"]
tmp["ratio_diff"] = abs((((tmp.trimmed_area + round(tmp.domain_y_diff * tmp.domx))) / tmp.full_domain - plot_ratio))
normalized_ratio_diff = (tmp.ratio_diff + plot_ratio) / plot_ratio
normalized_goal_diff = (tmp.goal_diff + goal) / goal
tmp["weighted_sorter"] = (tmp.px + tmp.py) ** (normalized_ratio_diff * normalized_goal_diff)
# tmp["ratio_diff"] = abs(((tmp.trimmed_area) / tmp.full_domain - plot_ratio))
tmp = tmp.sort_values(
by=["weighted_sorter", "goal_diff", "ratio_diff", "domain_y_diff", "trimmed_area"],
ascending=[True, True, True, True, False],
)
# tmp = tmp.sort_values(by=["goal_diff", "domain_y_diff", "trimmed_area"], ascending=[True, True, False])
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = tmp[["px", "py", "domx", "domy", "trimmed_dy"]].iloc[0]
return tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y
def calc_house_size(plot_x, plot_y, house_footprint, dx, dy):
goal = house_footprint / (dx * dy)
f1 = range(1, plot_x + 1)
f2 = range(1, plot_y + 1)
true_x, true_y = f1[0], f2[0]
for x in f1:
for y in f2:
padded_x, padded_y = x - 0, y - 0
nums = sorted([padded_x, padded_y])
if nums[0] * 2 < nums[1]:
continue
if abs(goal - padded_x * padded_y) < abs(goal - true_x * true_y):
true_x, true_y = padded_x, padded_y
elif (abs(goal - padded_x * padded_y) == abs(goal - true_x * true_y)) and (
abs(padded_x - padded_y) < abs(true_x - true_y)
):
true_x, true_y = padded_x, padded_y
return true_x, true_y
class BaseDomainArea(ABC):
subplot: Optional["BaseDomainArea"]
x: int
y: int
z: Optional[int]
matrix: np.ndarray
def __str__(self) -> str:
string = ""
for row in self.matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
@abstractmethod
def get_matrix(self) -> np.ndarray:
"""Get the numpy matrix representation of the domain area"""
def _validate_matrix_size(self, subplot):
for value in ["x", "y"]:
cell_val = getattr(self, value)
subplot_val = getattr(subplot, value)
if subplot_val and cell_val < subplot_val:
raise ValueError(
f"The {value} ({cell_val}) value of {self.__class__.__name__}"
f" must be larger than the house ({subplot_val}) going on it!"
)
def save_matrix(self, filename: str, matrix_name: str = None) -> None:
matrix = self.matrix if matrix_name is None else getattr(self, matrix_name)
np.savetxt(filename, matrix, delimiter=",")
class House(BaseDomainArea):
def __init__(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
house = np.full((self.x, self.y), self.z)
return house
class Cell(BaseDomainArea):
def __init__(self, subplot: House, x: int, y: int) -> None:
self.subplot = subplot
self.x = x
self.y = y
self._validate_matrix_size(subplot=self.subplot)
self.matrix = self.get_matrix()
def get_matrix(self) -> np.ndarray:
left = (self.x - self.subplot.x) // 2
top = (self.y - self.subplot.y) // 2
plot = np.zeros((self.x, self.y), dtype=int)
plot[left : left + self.subplot.x, top : top + self.subplot.y] = self.subplot.matrix
return plot
class Domain(BaseDomainArea):
def __init__(self, subplot: Cell, tdomain_x, tdomain_y, full_x, full_y, trimmed_y, plot_ratio, stack_height) -> None:
self.subplot = subplot
self.temp_x = tdomain_x
self.temp_y = tdomain_y
self.full_x = full_x
self.full_y = full_y
self.trimmed_y = trimmed_y
self.plot_ratio = plot_ratio
self.stack_height = stack_height
# self._validate_matrix_size(subplot=self.subplot)
self.matrix, self.trees_matrix = self.get_matrix()
def print_tree_matrix(self) -> str:
string = ""
for row in self.trees_matrix:
string += f'{" ".join(str(int(pixel)) for pixel in row)}\n'
return string
def get_matrix(self) -> np.ndarray:
houses_row = np.tile(
self.subplot.matrix,
(
self.temp_x // self.subplot.x,
1,
),
)
number_of_house_rows = self.trimmed_y // self.subplot.y
number_of_full_tree_rows = self.temp_y - self.trimmed_y - 1
mixed_row_ratio = self.temp_y * self.plot_ratio - self.trimmed_y
tree_row = np.full((self.temp_x, 1), -1)
mixed_row = np.array(
[-1 if i <= mixed_row_ratio * self.temp_x else 0 for i in range(1, self.temp_x + 1)]
).reshape(self.temp_x, 1)
rows = [[houses_row.copy()] for _ in range(number_of_house_rows)]
trees = [tree_row.copy() for _ in range(number_of_full_tree_rows)]
trees.insert(number_of_house_rows // 2, mixed_row)
while trees:
for row in rows:
if not trees:
break
row.append(trees.pop())
domain_with_trees = np.concatenate([np.concatenate(row, axis=1) for row in rows], axis=1)
dwtx = domain_with_trees.shape[0]
dwty = domain_with_trees.shape[1]
xs = int(np.floor((self.full_x - dwtx) / 2)), int(np.ceil((self.full_x - dwtx) / 2))
full_domain = np.pad(domain_with_trees, (xs, (self.full_y - dwty, 0)))
mid_x = self.full_x // 2
full_domain[mid_x - 2:mid_x + 2, :1] = self.stack_height # stack for surface scalar to come out of
domain = np.where(full_domain != -1, full_domain, 0)
trees = np.where(full_domain == -1, full_domain, 0)
return domain.T, trees.T
@classmethod
def from_domain_config(cls, house, config):
cell = Cell(house, tree_domain_fraction=config["trees"]["domain_fraction"], **config["plot_size"])
x = config["domain"]["x"]
y = config["domain"]["y"]
return cls(subplot=cell, x=x, y=y)
@classmethod
def from_plot_size(cls, house, config, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, stack_height):
cell = Cell(house, x=tplot_x, y=tplot_y)
# x = config["domain"]["x"]
# y = config["domain"]["y"]
return cls(cell, tdomain_x, tdomain_y, config["domain"]["x"], config["domain"]["y"], trimmed_y, plot_ratio, stack_height)
def setup_domain(cfg):
domain_x, domain_y = cfg["domain"]["x"], (round(cfg["domain"]["y"] * cfg["domain"]["urban_ratio"]))
plot_footprint, plot_ratio, dx, dy = (
cfg["plot"]["plot_footprint"],
cfg["plot"]["plot_ratio"],
cfg["domain"]["dx"],
cfg["domain"]["dy"],
)
plots = calc_plot_sizes(
domain_x,
domain_y,
plot_footprint,
cfg["house"]["footprint"],
plot_ratio,
dx,
dy,
cfg["domain"]["y"],
)
tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y = get_best_plot_size(plots, plot_footprint, plot_ratio, dx, dy)
house_x, house_y = calc_house_size(tplot_x, tplot_y, cfg["house"]["footprint"], dx, dy)
house = House(house_x, house_y, cfg["house"]["height"])
return Domain.from_plot_size(house, cfg, tplot_x, tplot_y, tdomain_x, tdomain_y, trimmed_y, plot_ratio, cfg["domain"]["stack_height"])
if __name__ == "__main__":
from .load_wrapper_config import get_wrapper_config
config = get_wrapper_config()
domain = setup_domain(config)
domain
| 2.640625 | 3 |
zad5.py | Alba126/Laba21 | 0 | 3049 | #!/usr/bin/env python3
# -*- config: utf-8 -*-
from tkinter import *
from random import random
def on_click():
x = random()
y = random()
bt1.place(relx=x, rely=y)
root = Tk()
root['bg'] = 'white'
root.title('crown')
img = PhotoImage(file='crown.png')
bt1 = Button(image=img, command=on_click)
bt1.place(relx=0.5, rely=0.5, anchor=CENTER)
root.mainloop()
| 3.5 | 4 |
tests/importer/utils/test_utils.py | HumanCellAtlas/ingest-common | 0 | 3050 | <gh_stars>0
from openpyxl import Workbook
def create_test_workbook(*worksheet_titles, include_default_sheet=False):
workbook = Workbook()
for title in worksheet_titles:
workbook.create_sheet(title)
if not include_default_sheet:
default_sheet = workbook['Sheet']
workbook.remove(default_sheet)
return workbook
| 2.453125 | 2 |
test/test_import_stats.py | WBobby/pytorch | 24 | 3051 | <gh_stars>10-100
import subprocess
import sys
import unittest
import pathlib
from torch.testing._internal.common_utils import TestCase, run_tests, IS_LINUX, IS_IN_CI
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent
try:
# Just in case PyTorch was not built in 'develop' mode
sys.path.append(str(REPO_ROOT))
from tools.stats.scribe import rds_write, register_rds_schema
except ImportError:
register_rds_schema = None
rds_write = None
# these tests could eventually be changed to fail if the import/init
# time is greater than a certain threshold, but for now we just use them
# as a way to track the duration of `import torch` in our ossci-metrics
# S3 bucket (see tools/stats/print_test_stats.py)
class TestImportTime(TestCase):
def test_time_import_torch(self):
TestCase.runWithPytorchAPIUsageStderr("import torch")
def test_time_cuda_device_count(self):
TestCase.runWithPytorchAPIUsageStderr(
"import torch; torch.cuda.device_count()",
)
@unittest.skipIf(not IS_LINUX, "Memory test is only implemented for Linux")
@unittest.skipIf(not IS_IN_CI, "Memory test only runs in CI")
def test_peak_memory(self):
def profile(module, name):
command = f"import {module}; import resource; print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)"
result = subprocess.run(
[sys.executable, "-c", command],
stdout=subprocess.PIPE,
)
max_rss = int(result.stdout.decode().strip())
return {
"test_name": name,
"peak_memory_bytes": max_rss,
}
data = profile("torch", "pytorch")
baseline = profile("sys", "baseline")
rds_write(
"import_stats", [data, baseline]
)
if __name__ == "__main__":
if register_rds_schema and IS_IN_CI:
register_rds_schema(
"import_stats",
{
"test_name": "string",
"peak_memory_bytes": "int",
"time_ms": "int",
},
)
run_tests()
| 1.984375 | 2 |
post_office/validators.py | fasih/django-post_office | 661 | 3052 | from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.utils.encoding import force_str
def validate_email_with_name(value):
"""
Validate email address.
Both "<NAME> <<EMAIL>>" and "<EMAIL>" are valid.
"""
value = force_str(value)
recipient = value
if '<' in value and '>' in value:
start = value.find('<') + 1
end = value.find('>')
if start < end:
recipient = value[start:end]
validate_email(recipient)
def validate_comma_separated_emails(value):
"""
Validate every email address in a comma separated list of emails.
"""
if not isinstance(value, (tuple, list)):
raise ValidationError('Email list must be a list/tuple.')
for email in value:
try:
validate_email_with_name(email)
except ValidationError:
raise ValidationError('Invalid email: %s' % email, code='invalid')
def validate_template_syntax(source):
"""
Basic Django Template syntax validation. This allows for robuster template
authoring.
"""
try:
Template(source)
except (TemplateSyntaxError, TemplateDoesNotExist) as err:
raise ValidationError(str(err))
| 2.765625 | 3 |
paperhub/input.py | GiuseppeBaldini/PaperHub | 0 | 3053 | # Input DOI / URL
import re
import sys
# Pyperclip is not built-in, check and download if needed
try:
import pyperclip
except (ImportError, ModuleNotFoundError):
print('Pyperclip module not found. Please download it.')
sys.exit(0)
# Regex for links
link_regex = re.compile(r'''(
http[s]?://
(?:[a-zA-Z]|
[0-9]|
[$-_@.&+]|
[!*\(\),]|
(?:%[0-9a-fA-F][0-9a-fA-F]))+
)''', re.IGNORECASE | re.VERBOSE)
# Get DOI / URL using different methods
# Method 1: argument
try:
input_link = sys.argv[1]
# Method 2: clipboard
except IndexError:
input_link = pyperclip.paste()
# Method 3: manual input
def regex_check(regex, link):
"""
Check using regex. If DOI/URL are not in the right format,
require manual input until correct or Enter to quit.
"""
while True:
match = re.match(regex, link)
if match == None:
link = str(input('''Enter valid DOI / URL or press Enter to quit: > '''))
if link == '':
exit()
else:
continue
else:
return link
url = regex_check(link_regex, input_link) | 3.3125 | 3 |
main.py | chillum1718/EffcientNetV2 | 0 | 3054 | <filename>main.py
import argparse
import csv
import os
import torch
import tqdm
from torch import distributed
from torch.utils import data
from torchvision import datasets
from torchvision import transforms
from nets import nn
from utils import util
data_dir = os.path.join('..', 'Dataset', 'IMAGENET')
def batch(images, target, model, criterion=None):
images = images.cuda()
target = target.cuda()
if criterion:
with torch.cuda.amp.autocast():
loss = criterion(model(images), target)
return loss
else:
return util.accuracy(model(images), target, top_k=(1, 5))
def train(args):
epochs = 350
batch_size = 288
util.set_seeds(args.rank)
model = nn.EfficientNet().cuda()
lr = batch_size * torch.cuda.device_count() * 0.256 / 4096
optimizer = nn.RMSprop(util.add_weight_decay(model), lr, 0.9, 1e-3, momentum=0.9)
ema = nn.EMA(model)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank])
else:
model = torch.nn.DataParallel(model)
criterion = nn.CrossEntropyLoss().cuda()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
dataset = datasets.ImageFolder(os.path.join(data_dir, 'train'),
transforms.Compose([util.RandomResize(),
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(),
util.RandomAugment(),
transforms.ToTensor(), normalize]))
if args.distributed:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = None
loader = data.DataLoader(dataset, batch_size, sampler=sampler, num_workers=8, pin_memory=True)
scheduler = nn.StepLR(optimizer)
amp_scale = torch.cuda.amp.GradScaler()
with open(f'weights/{scheduler.__str__()}.csv', 'w') as f:
if args.local_rank == 0:
writer = csv.DictWriter(f, fieldnames=['epoch', 'acc@1', 'acc@5'])
writer.writeheader()
best_acc1 = 0
for epoch in range(0, epochs):
if args.distributed:
sampler.set_epoch(epoch)
if args.local_rank == 0:
print(('\n' + '%10s' * 2) % ('epoch', 'loss'))
bar = tqdm.tqdm(loader, total=len(loader))
else:
bar = loader
model.train()
for images, target in bar:
loss = batch(images, target, model, criterion)
optimizer.zero_grad()
amp_scale.scale(loss).backward()
amp_scale.step(optimizer)
amp_scale.update()
ema.update(model)
torch.cuda.synchronize()
if args.local_rank == 0:
bar.set_description(('%10s' + '%10.4g') % ('%g/%g' % (epoch + 1, epochs), loss))
scheduler.step(epoch + 1)
if args.local_rank == 0:
acc1, acc5 = test(ema.model.eval())
writer.writerow({'acc@1': str(f'{acc1:.3f}'),
'acc@5': str(f'{acc5:.3f}'),
'epoch': str(epoch + 1).zfill(3)})
util.save_checkpoint({'state_dict': ema.model.state_dict()}, acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if args.distributed:
torch.distributed.destroy_process_group()
torch.cuda.empty_cache()
def test(model=None):
if model is None:
model = nn.EfficientNet()
model.load_state_dict(torch.load('weights/best.pt', 'cpu')['state_dict'])
model = model.cuda()
model.eval()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
dataset = datasets.ImageFolder(os.path.join(data_dir, 'val'),
transforms.Compose([transforms.Resize(416),
transforms.CenterCrop(384),
transforms.ToTensor(), normalize]))
loader = data.DataLoader(dataset, 48, num_workers=os.cpu_count(), pin_memory=True)
top1 = util.AverageMeter()
top5 = util.AverageMeter()
with torch.no_grad():
for images, target in tqdm.tqdm(loader, ('%10s' * 2) % ('acc@1', 'acc@5')):
acc1, acc5 = batch(images, target, model)
torch.cuda.synchronize()
top1.update(acc1.item(), images.size(0))
top5.update(acc5.item(), images.size(0))
acc1, acc5 = top1.avg, top5.avg
print('%10.3g' * 2 % (acc1, acc5))
if model is None:
torch.cuda.empty_cache()
else:
return acc1, acc5
def print_parameters():
model = nn.EfficientNet().eval()
_ = model(torch.zeros(1, 3, 224, 224))
params = sum(p.numel() for p in model.parameters())
print(f'Number of parameters: {int(params)}')
def benchmark():
shape = (1, 3, 384, 384)
util.torch2onnx(nn.EfficientNet().export().eval(), shape)
util.onnx2caffe()
util.print_benchmark(shape)
def main():
# python -m torch.distributed.launch --nproc_per_node=3 main.py --train
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--benchmark', action='store_true')
parser.add_argument('--train', action='store_true')
parser.add_argument('--test', action='store_true')
args = parser.parse_args()
args.distributed = False
args.rank = 0
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.rank = torch.distributed.get_rank()
if args.local_rank == 0:
if not os.path.exists('weights'):
os.makedirs('weights')
if args.local_rank == 0:
print_parameters()
if args.benchmark:
benchmark()
if args.train:
train(args)
if args.test:
test()
if __name__ == '__main__':
main()
| 2.46875 | 2 |
cgbind/esp.py | duartegroup/cgbind | 7 | 3055 | <gh_stars>1-10
import numpy as np
from time import time
from cgbind.atoms import get_atomic_number
from cgbind.log import logger
from cgbind.constants import Constants
from cgbind.exceptions import CgbindCritical
def get_esp_cube_lines(charges, atoms):
"""
From a list of charges and a set of xyzs create the electrostatic potential
map grid-ed uniformly between the most negative x, y, z values -5 Å
and the largest x, y, z +5 Å
:param charges: (list(float))
:param atoms: (list(autode.atoms.Atom))
:return: (list(str)), (min ESP value, max ESP value)
"""
logger.info('Calculating the ESP and generating a .cube file')
start_time = time()
try:
from esp_gen import get_cube_lines
except ModuleNotFoundError:
raise CgbindCritical('esp_gen not available. cgbind must be '
'installed with the --esp_gen flag')
if charges is None:
logger.error('Could not generate an .cube file, charges were None')
return [], (None, None)
coords = np.array([atom.coord for atom in atoms])
charges = np.array(charges)
# Get the max and min points from the coordinates
max_cart_values = np.max(coords, axis=0)
min_cat_values = np.min(coords, axis=0)
# The grid needs to be slightly larger than the smallest/largest Cartesian
# coordinate
# NOTE: All distances from here are in Bohr (a0) i.e. atomic units
min_carts = Constants.ang2a0 * (min_cat_values - 5 * np.ones(3))
max_carts = Constants.ang2a0 * (max_cart_values + 5 * np.ones(3))
coords = np.array([Constants.ang2a0 * np.array(coord) for coord in coords])
# Number of voxels will be nx * ny * nz
nx, ny, nz = 50, 50, 50
vox_size = max_carts - min_carts
rx, ry, rz = vox_size[0] / nx, vox_size[1] / ny, vox_size[2] / nz
# Write the .cube file lines
cube_file_lines = ['Generated by cgbind\n', 'ESP\n']
n_atoms = len(coords)
min_x, min_y, min_z = min_carts
cube_file_lines.append(f'{n_atoms:>5d}{min_x:>12f}{min_y:>12f}{min_z:>12f}\n') # n_atoms origin(x y z)
cube_file_lines.append(f'{nx:>5d}{rx:>12f}{0.0:>12f}{0.0:>12f}\n') # Number of voxels and their size
cube_file_lines.append(f'{ny:>5d}{0.0:>12f}{ry:>12f}{0.0:>12f}\n')
cube_file_lines.append(f'{nz:>5d}{0.0:>12f}{0.0:>12f}{rz:>12f}\n')
for atom in atoms:
x, y, z = atom.coord
cube_file_lines.append(f'{get_atomic_number(atom):>5d}{0.0:>12f}'
f'{Constants.ang2a0*x:>12f}{Constants.ang2a0*y:>12f}{Constants.ang2a0*z:>12f}\n')
# Looping over x, y, z is slow in python so use Cython extension
cube_val_lines, min_val, max_val = get_cube_lines(nx, ny, nz, coords, min_carts, charges, vox_size)
cube_file_lines += cube_val_lines
logger.info(f'ESP generated in {time()-start_time:.3f} s')
return cube_file_lines, (min_val, max_val)
| 2.5 | 2 |
codes/test_specular.py | mcdenoising/AdvMCDenoise | 35 | 3056 | import os
import sys
import logging
import time
import argparse
import numpy as np
from collections import OrderedDict
import scripts.options as option
import utils.util as util
from data.util import bgr2ycbcr
from data import create_dataset, create_dataloader
from models import create_model
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to options JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G'))
opt = option.dict_to_nonedict(opt)
util.setup_logger(None, opt['path']['log'], 'test.log', level=logging.INFO, screen=True)
logger = logging.getLogger('base')
logger.info(option.dict2str(opt))
# Create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(test_set, dataset_opt)
logger.info('Number of test images in [{:s}]: {:d}'.format(dataset_opt['name'], len(test_set)))
test_loaders.append(test_loader)
# Create model
model = create_model(opt)
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info('\nTesting [{:s}]...'.format(test_set_name))
test_start_time = time.time()
dataset_dir = os.path.join(opt['path']['results_root'], test_set_name)
util.mkdir(dataset_dir)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['psnr_y'] = []
test_results['ssim_y'] = []
for data in test_loader:
need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True
# need_GT = True
model.feed_data_specular(data, need_GT=need_GT)
if opt["image_type"] == "exr":
y = data["x_offset"]
x = data["y_offset"]
img_path = data['NOISY_path'][0]
img_name = os.path.splitext(os.path.basename(img_path))[0]
start = time.time()
model.test() # test
end = time.time()
print("Time elapsed... %f "%(end - start))
visuals = model.get_current_visuals(need_GT=need_GT)
denoised_img = util.tensor2img(visuals['DENOISED']) # uint8
noisy_img = util.tensor2img(visuals['NOISY'])
gt_img = util.tensor2img(visuals['GT']) # uint8
# save images
suffix = opt['suffix']
if suffix ==None:
suffix = ""
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.png')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.png')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.png')
# calculate PSNR and SSIM
if need_GT:
# gt_img = util.tensor2img(visuals['GT'])
gt_img = gt_img / 255.
denoised_img = denoised_img / 255.
crop_border = test_loader.dataset.opt['scale']
cropped_denoised_img = denoised_img#[crop_border:-crop_border, crop_border:-crop_border, :]
cropped_gt_img = gt_img#[crop_border:-crop_border, crop_border:-crop_border, :]
psnr = util.calculate_psnr(cropped_denoised_img * 255, cropped_gt_img * 255)
ssim = util.calculate_ssim(cropped_denoised_img * 255, cropped_gt_img * 255)
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
if gt_img.shape[2] == 3: # RGB image
denoised_img_y = bgr2ycbcr(denoised_img, only_y=True)
gt_img_y = bgr2ycbcr(gt_img, only_y=True)
cropped_denoised_img_y = denoised_img_y[crop_border:-crop_border, crop_border:-crop_border]
cropped_gt_img_y = gt_img_y[crop_border:-crop_border, crop_border:-crop_border]
psnr_y = util.calculate_psnr(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
ssim_y = util.calculate_ssim(cropped_denoised_img_y * 255, cropped_gt_img_y * 255)
test_results['psnr_y'].append(psnr_y)
test_results['ssim_y'].append(ssim_y)
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}; PSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}.'\
.format(img_name, psnr, ssim, psnr_y, ssim_y))
else:
logger.info('{:20s} - PSNR: {:.6f} dB; SSIM: {:.6f}.'.format(img_name, psnr, ssim))
else:
logger.info(img_name)
if opt["image_type"] == "exr":
denoised_exr = util.tensor2exr(visuals['DENOISED']) # uint8
noisy_exr = util.tensor2exr(visuals['NOISY'])
gt_exr = util.tensor2exr(visuals['GT']) # uint8
save_DENOISED_img_path = os.path.join(dataset_dir, img_name + suffix + '_1denoised.exr')
save_NOISY_img_path = os.path.join(dataset_dir, img_name + suffix + '_0noisy.exr')
save_GT_img_path = os.path.join(dataset_dir, img_name + suffix + '_2gt.exr')
util.saveEXRfromMatrix(save_DENOISED_img_path, denoised_exr, (x, y))
util.saveEXRfromMatrix(save_NOISY_img_path, noisy_exr, (x, y))
util.saveEXRfromMatrix(save_GT_img_path, gt_exr, (x, y))
if need_GT: # metrics
# Average PSNR/SSIM results
ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
logger.info('----Average PSNR/SSIM results for {}----\n\tPSNR: {:.6f} dB; SSIM: {:.6f}\n'\
.format(test_set_name, ave_psnr, ave_ssim))
# if test_results['psnr_y'] and test_results['ssim_y']:
# ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
# ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
# logger.info('----Y channel, average PSNR/SSIM----\n\tPSNR_Y: {:.6f} dB; SSIM_Y: {:.6f}\n'\
# .format(ave_psnr_y, ave_ssim_y))
| 2.015625 | 2 |
neuralNetwork/layer3/nerualNet.py | zzw0929/deeplearning | 4 | 3057 | <filename>neuralNetwork/layer3/nerualNet.py
# coding:utf-8
import time
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
import matplotlib
matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
np.random.seed(0)
X, y = sklearn.datasets.make_moons(200, noise=0.20)
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
# plt.show()
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X, y)
# Helper function to plot a decision boundary.
# If you don't fully understand this function don't worry, it just generates
# the contour plot below.
def plot_decision_boundary(pred_func):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max,
h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
plot_decision_boundary(lambda x: clf.predict(x))
plt.title("Logistic Regression")
#plt.show()
num_examples = len(X) # training set size
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
# Helper function to evaluate the total loss on the dataset
def calculate_loss(model):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
#print(11111111)
#print(probs)
#time.sleep(10)
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
# L2 regulatization
data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1./num_examples * data_loss
def predict(model, x):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(nn_hdim, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
np.random.seed(0)
W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)
b1 = np.zeros((1, nn_hdim))
W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)
b2 = np.zeros((1, nn_output_dim))
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
delta3[range(num_examples), y] -= 1
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" %(i, calculate_loss(model)))
return model
def test_1():
# Build a model with a 3-dimensional hidden layer
model = build_model(3, print_loss=True)
# Plot the decision boundary
plot_decision_boundary(lambda x: predict(model, x))
plt.title("Decision Boundary for hidden layer size 3")
plt.show()
def test_2():
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4, 5, 20, 50]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
plt.subplot(5, 2, i+1)
plt.title('Hidden Layer size %d' % nn_hdim)
model = build_model(nn_hdim)
plot_decision_boundary(lambda x: predict(model, x))
plt.show()
if __name__ == '__main__':
#print(y)
#print(12121)
#print(X)
test_1()
| 3.203125 | 3 |
app/domain/create_db.py | Lifeistrange/flaskweb | 0 | 3058 | <reponame>Lifeistrange/flaskweb<filename>app/domain/create_db.py<gh_stars>0
#!/usr/bin/env python
# coding=utf-8
from manage import db
import app.domain.model
db.create_all()
| 1.34375 | 1 |
tljh_repo2docker/tests/utils.py | TimoRoth/tljh-repo2docker | 46 | 3059 | <gh_stars>10-100
import asyncio
import json
from aiodocker import Docker, DockerError
from jupyterhub.tests.utils import api_request
async def add_environment(
app, *, repo, ref="master", name="", memory="", cpu=""
):
"""Use the POST endpoint to add a new environment"""
r = await api_request(
app,
"environments",
method="post",
data=json.dumps(
{"repo": repo, "ref": ref, "name": name, "memory": memory, "cpu": cpu,}
),
)
return r
async def wait_for_image(*, image_name):
"""wait until an image is built"""
count, retries = 0, 60 * 10
image = None
async with Docker() as docker:
while count < retries:
await asyncio.sleep(1)
try:
image = await docker.images.inspect(image_name)
except DockerError:
count += 1
continue
else:
break
return image
async def remove_environment(app, *, image_name):
"""Use the DELETE endpoint to remove an environment"""
r = await api_request(
app, "environments", method="delete", data=json.dumps({"name": image_name,}),
)
return r
| 2.453125 | 2 |
05_ARIADNE_SUBSCRIPTIONS_GRAPHQL/api/resolvers/mutations/__init__.py | CrispenGari/python-flask | 2 | 3060 |
from api import db
from uuid import uuid4
from ariadne import MutationType
from api.models import Post
from api.store import queues
mutation = MutationType()
@mutation.field("createPost")
async def create_post_resolver(obj, info, input):
try:
post = Post(postId=uuid4(), caption=input["caption"])
db.session.add(post)
db.session.commit()
for queue in queues:
queue.put(post)
return{
"error": None,
"post": post
}
except Exception as e:
return{
"error": {"message":str(e), "field": "unknown"},
"post": None
} | 2.328125 | 2 |
async_sched/client/__init__.py | justengel/async_sched | 1 | 3061 | <filename>async_sched/client/__init__.py
from async_sched.client import quit_server as module_quit
from async_sched.client import request_schedules as module_request
from async_sched.client import run_command as module_run
from async_sched.client import schedule_command as module_schedule
from async_sched.client import stop_schedule as module_stop
from async_sched.client import update_server as module_update
from .client import Client, \
quit_server_async, quit_server, update_server_async, update_server, request_schedules_async, \
request_schedules, run_command_async, run_command, schedule_command_async, schedule_command, \
stop_schedule_async, stop_schedule
# The other modules in this package exist for the "-m" python flag
# `python -m async_sched.client.request_schedules --host "172.16.17.32" --port 8000`
__all__ = ['Client',
'quit_server_async', 'quit_server', 'update_server_async', 'update_server', 'request_schedules_async',
'request_schedules', 'run_command_async', 'run_command', 'schedule_command_async', 'schedule_command',
'stop_schedule_async', 'stop_schedule',
'module_quit', 'module_request', 'module_run', 'module_schedule', 'module_stop', 'module_update']
| 2.0625 | 2 |
full-stack-angular-ngrx/backend/src/core/interfaces/crud.py | t4d-classes/angular_02212022 | 0 | 3062 | import abc
from typing import TypeVar, Generic, List, Dict
T = TypeVar('T')
class CRUDInterface(Generic[T], metaclass=abc.ABCMeta):
@abc.abstractmethod
def all(self) -> List[T]:
pass
@abc.abstractmethod
def one_by_id(self, entity_id: int) -> T:
pass
@abc.abstractmethod
def append_one(self, entity: Dict) -> T:
pass
@abc.abstractmethod
def replace_one(self, entity: Dict) -> None:
pass
@abc.abstractmethod
def remove_one(self, entity_id: int) -> None:
pass
| 3.015625 | 3 |
package/tests/test_init_command.py | MrKriss/stonemason | 2 | 3063 | <filename>package/tests/test_init_command.py
from pathlib import Path
import pytest
import git
import json
from conftest import TEST_DIR
def test_init_with_project(tmpdir):
output_path = Path(tmpdir.strpath)
# Set arguments
args = f"init -o {output_path} {TEST_DIR}/example_templates/python_project"
from masonry import main
# Run from entry point
main.main(args=args)
# Check files were created
package_name = 'testpackage'
files = [
'.git/',
'.mason',
'MANIFEST.in',
'README',
'requirements.txt',
'setup.py',
'src/testpackage',
'src/testpackage/__init__.py',
'src/testpackage/main.py'
]
for f in files:
p = output_path / package_name / f
assert p.exists()
# Check requirements were polulated
target = "requests\nlogzero\n"
req_file = output_path / package_name / 'requirements.txt'
result = req_file.read_text()
assert result == target
# Check git repo was created and commits made
repo_dir = output_path / package_name
r = git.Repo(repo_dir.as_posix())
log = r.git.log(pretty='oneline').split('\n')
assert len(log) == 1
assert "Add 'package' template layer via stone mason." in log[0]
def test_init_with_project_and_template(tmpdir, no_prompts):
output_path = Path(tmpdir.strpath)
# Set arguments
args = f"init -o {output_path} {TEST_DIR}/example_templates/python_project/pytest"
from masonry import main
# Run from entry point
main.main(args=args)
# Check files were created
package_name = 'testpackage'
files = [
'.git/',
'.mason',
'MANIFEST.in',
'README',
'requirements.txt',
'setup.py',
'src/testpackage',
'src/testpackage/__init__.py',
'src/testpackage/main.py',
'tests/test_foo.py'
]
for f in files:
p = output_path / package_name / f
assert p.exists()
# Check requirements were polulated
target = "requests\nlogzero\npytest\npytest-cov\ncoverage\n"
req_file = output_path / package_name / 'requirements.txt'
result = req_file.read_text()
assert result == target
# Check MANIFEST was prefixed
target = "graft tests\ngraft src\n"
manifest_file = output_path / package_name / 'MANIFEST.in'
result = manifest_file.read_text()
assert result == target
# Check git repo was created and commits made
repo_dir = output_path / package_name
r = git.Repo(repo_dir.as_posix())
log = r.git.log(pretty='oneline').split('\n')
assert len(log) == 2
assert "Add 'pytest' template layer via stone mason." in log[0]
assert "Add 'package' template layer via stone mason." in log[1]
| 2.375 | 2 |
mistral/mistral/api/controllers/v2/service.py | Toure/openstack_mistral_wip | 0 | 3064 | # Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from pecan import rest
import six
import tooz.coordination
import wsmeext.pecan as wsme_pecan
from mistral.api import access_control as acl
from mistral.api.controllers.v2 import resources
# TODO(rakhmerov): invalid dependency, a REST controller must not depend on
# a launch script.
from mistral.cmd import launch
from mistral import context
from mistral import exceptions as exc
from mistral.service import coordination
from mistral.utils import rest_utils
LOG = logging.getLogger(__name__)
class ServicesController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Services)
def get_all(self):
"""Return all services."""
acl.enforce('services:list', context.ctx())
LOG.info("Fetch services.")
if not cfg.CONF.coordination.backend_url:
raise exc.CoordinationException("Service API is not supported.")
service_coordinator = coordination.get_service_coordinator()
if not service_coordinator.is_active():
raise exc.CoordinationException(
"Failed to connect to coordination backend."
)
services_list = []
service_group = ['%s_group' % i for i in launch.LAUNCH_OPTIONS]
try:
for group in service_group:
members = service_coordinator.get_members(group)
services_list.extend(
[resources.Service.from_dict(
{'type': group, 'name': member}) for member in members]
)
except tooz.coordination.ToozError as e:
# In the scenario of network interruption or manually shutdown
# connection shutdown, ToozError will be raised.
raise exc.CoordinationException(
"Failed to get service members from coordination backend. %s"
% six.text_type(e)
)
return resources.Services(services=services_list)
| 1.726563 | 2 |
setup.py | jtauber/greek-utils | 13 | 3065 | from setuptools import setup
setup(
name="greek-utils",
version="0.2",
description="various utilities for processing Ancient Greek",
license="MIT",
url="http://github.com/jtauber/greek-utils",
author="<NAME>",
author_email="<EMAIL>",
packages=["greekutils"],
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Text Processing",
"Topic :: Text Processing :: Linguistic",
"Topic :: Utilities",
],
)
| 1.039063 | 1 |
source/tweet.py | jfilter/foia-bot | 0 | 3066 | """
tweet stuff in intervals
"""
import time
import datetime
import twitter
from markov_chains import german_text
from config import config_no, config_yes
MAX_TWEET_LENGTH = 280
greeting = ' Sehr geehrte/r Anstragssteller/in.'
ending = ' MfG'
num_tweets = 3
class FoiaBot:
def __init__(self, config):
self.api = twitter.Api(consumer_key=config["consumer_key"],
consumer_secret=config["consumer_secret"],
access_token_key=config["access_token"],
access_token_secret=config["access_token_secret"], sleep_on_rate_limit=True)
self.screen_name = config["screen_name"]
self.model = german_text.setup_model(config["model_path"])
self.hour_to_tweet = config["hour_to_tweet"]
def get_favorites(self):
favorites = self.api.GetFavorites(
screen_name=self.screen_name, count=200)
print(favorites)
fav_set = set([f.id for f in favorites])
return fav_set
def get_status_to_work_on(self):
favorites = self.get_favorites()
status_list = self.api.GetMentions(count=200, trim_user=True,
contributor_details=False, include_entities=False)
for status in status_list:
print(status)
if status.id in favorites:
continue
if status.in_reply_to_status_id is not None:
continue
if not status.text.startswith('@' + self.screen_name):
continue
self.post_replies(status)
def post_replies(self, status):
tweets = self.create_tweets()
print(tweets)
success = True
reply_to_status_id = status.id
for tweet in tweets:
response = self.api.PostUpdate(tweet, in_reply_to_status_id=reply_to_status_id, auto_populate_reply_metadata=True,
exclude_reply_user_ids=False, trim_user=True, verify_status_length=False)
if response is None:
success = False
break
else:
reply_to_status_id = response.id
if success:
self.api.CreateFavorite(status=status)
def generate_sentence(self, tweet_text, chars_left, set_limit=False):
max_length = 150
if set_limit:
max_length = chars_left
new_sent = self.model.make_short_sentence(max_length, tries=100)
if new_sent is not None and len(new_sent) < chars_left:
tweet_text += ' ' + new_sent
return tweet_text
# https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date
def get_date_from_twitter_string(self, created_at):
x = time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')
return datetime.datetime.fromtimestamp(time.mktime(x))
def tweet_once_a_day(self):
now = datetime.datetime.now()
print(now.hour)
if now.hour == self.hour_to_tweet:
last_status_list = self.api.GetUserTimeline(screen_name=self.screen_name, count=1,
include_rts=False, trim_user=True, exclude_replies=True)
print(last_status_list)
if last_status_list is None:
return
if len(last_status_list) == 0:
self.post_single_tweet()
if len(last_status_list) == 1:
last_status = last_status_list[0]
created_at_date = self.get_date_from_twitter_string(
last_status.created_at)
time_diff = now - created_at_date
print('time_diff', time_diff)
time_diff_hours = time_diff.seconds / 3600 + time_diff.days * 24
print(time_diff_hours)
if time_diff_hours > 20: # something is broken with the date but whatever
self.post_single_tweet()
def post_single_tweet(self):
tweet_text = self.generate_single_tweet_text()
response = self.api.PostUpdate(tweet_text, verify_status_length=False)
def generate_single_tweet_text(self):
tweet_text = ""
while True:
chars_left = MAX_TWEET_LENGTH - len(tweet_text)
chars_left -= 1 # for the space
if chars_left < 20:
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
return tweet_text
def create_tweets(self):
tweets = []
for i in range(num_tweets):
tweet_text = f'{i + 1}/{num_tweets}'
if i == 0:
tweet_text += greeting
while True:
chars_left = MAX_TWEET_LENGTH - \
len(tweet_text) - 1 # because of space
# ensure space for the ending
if i + 1 == num_tweets:
chars_left -= len(ending)
if chars_left < 20:
# at ending
if i + 1 == num_tweets:
tweet_text += ending
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
tweets.append(tweet_text)
return tweets
def run(self):
self.get_status_to_work_on()
def main():
print('main called')
no_bot = FoiaBot(config_no)
print('after setting up no bot')
yes_bot = FoiaBot(config_yes)
print('after setting up yes bot')
no_bot.run()
print('after running no bot')
yes_bot.run()
print('after running yes bot')
no_bot.tweet_once_a_day()
yes_bot.tweet_once_a_day()
print('after tweet once a day')
def lambda_handler(event, context):
print('handler called')
main()
print('handler about to finish')
# if __name__ == '__main__':
# main()
| 2.9375 | 3 |
account_processing.py | amitjoshi9627/Playong | 4 | 3067 | <reponame>amitjoshi9627/Playong
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
import getpass
import time
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from utils import *
def login_user(browser, email='', password=''):
print('Redirecting to login page..')
browser.find_element_by_xpath('//*[@id="login-btn"]').click()
if email is '':
email, password = take_credentials()
browser.find_element_by_id("login_username").send_keys(email)
browser.find_element_by_id("login_password").send_keys(password)
complete_captcha(browser)
time.sleep(4)
browser.find_element_by_xpath('//*[@id="static-login-btn"]').click()
def logout_user(browser):
print("\nThank you for your using the program! Logging you out from jiosaavn...")
show_notificaton("Thank", "You", 0)
action = ActionChains(browser)
menu = browser.find_element_by_class_name('user-name')
action.move_to_element(menu).perform()
menu.click()
browser.find_element_by_xpath(
'/html/body/div[2]/div/div[2]/div[3]/div[3]/ol/li[4]/a').click()
time.sleep(2)
print('Logout..successful...')
def check_credentials(browser):
print('Checking credentials...Please wait..')
time.sleep(5)
try:
close_promo_ad(browser)
accept_cookies(browser)
success = True
except:
success = False
return success
def wrong_credentials_check(browser, counts=1):
while success != True:
print("\nWrong username/password entered.Please try again...\n")
email = input("Enter your email for jiosaavn account: ")
password = <PASSWORD>(f"Enter password for {email}: ")
email_element = browser.find_element_by_id("login_username")
email_element.clear()
email_element.send_keys(email)
pswd_element = browser.find_element_by_id("login_password")
pswd_element.clear()
pswd_element.send_keys(password)
browser.find_element_by_xpath('//*[@id="static-login-btn"]').click()
success = check_credentials(browser)
counts += 1
if counts > 4:
print('Too many unsuccessful attempts done. Exiting...\n')
break
return counts
def go_without_login(browser):
return False
def take_credentials():
email = input("Enter your email for jiosaavn account: ")
password = getpass.getpass(f"Enter password for {email}: ")
return email, password
def prompt(browser):
# response = int(input("Press 1 to Log in with you account else Press 0: "))
# if response:
# login_user(browser)
# return True
# else:
# go_without_login(browser)
print("Due to some issues.. Login Option is not available currently! Sorry for the inconvenience caused.")
go_without_login(browser)
| 2.609375 | 3 |
Mundo 3/teste.py | RafaelSdm/Curso-de-Python | 1 | 3068 | pessoas = {'nomes': "Rafael","sexo":"macho alfa","idade":19}
print(f"o {pessoas['nomes']} que se considera um {pessoas['sexo']} possui {pessoas['idade']}")
print(pessoas.keys())
print(pessoas.values())
print(pessoas.items())
for c in pessoas.keys():
print(c)
for c in pessoas.values():
print(c)
for c, j in pessoas.items():
print(f"o {c} pertence ao {j}")
del pessoas['sexo']
print(pessoas)
pessoas["sexo"] = "macho alfa"
print(pessoas)
print("outro codida daqui pra frente \n\n\n\n\n\n")
estado1 = {'estado': 'minas gerais', 'cidade':'capela nova' }
estado2 = {'estado':'rio de janeiro', 'cidade':"rossinha"}
brasil = []
brasil.append(estado1)
brasil.append(estado2)
print(brasil)
print(f"o brasil possui um estado chamado {brasil[0]['estado']} e a prorpia possui uma cidade chamada {brasil[0]['cidade']}")
print("-"*45)
es = {}
br = []
for c in range(0,3):
es['estado'] = str(input("informe o seu estado:"))
es['cidade'] = str(input("informe a sua cidade:"))
br.append(es.copy())
for c in br:
for i,j in c.items():
print(f"o campo {i} tem valor {j}")
| 4.34375 | 4 |
rsa-cipher/makeRsaKeys.py | mumbo-pro/cyrptography-algorithm | 1 | 3069 | # RSA Key Generator
2. # http://inventwithpython.com/hacking (BSD Licensed) 3. 4. import random, sys, os, rabinMiller, cryptomath
The program imports the rabinMiller and cryptomath modules that we created in the last chapter, along with a few others.
Chapter 24 – Public Key Cryptography and the RSA Cipher 387
makeRsaKeys.py
7. def main(): 8. # create a public/private keypair with 1024 bit keys 9. print('Making key files...') 10. makeKeyFiles('al_sweigart', 1024) 11. print('Key files made.')
| 2.78125 | 3 |
atlas-outreach-data-tools-framework-1.1/Configurations/PlotConf_TTbarAnalysis.py | Harvard-Neutrino/phys145 | 0 | 3070 | config = {
"Luminosity": 1000,
"InputDirectory": "results",
"Histograms" : {
"WtMass" : {},
"etmiss" : {},
"lep_n" : {},
"lep_pt" : {},
"lep_eta" : {},
"lep_E" : {},
"lep_phi" : {"y_margin" : 0.6},
"lep_charge" : {"y_margin" : 0.6},
"lep_type" : {"y_margin" : 0.5},
"lep_ptconerel30" : {},
"lep_etconerel20" : {},
"lep_d0" : {},
"lep_z0" : {},
"n_jets" : {},
"jet_pt" : {},
"jet_m" : {},
"jet_jvf" : {"y_margin" : 0.4},
"jet_eta" : {},
"jet_MV1" : {"y_margin" : 0.3},
"vxp_z" : {},
"pvxp_n" : {},
},
"Paintables": {
"Stack": {
"Order" : ["Diboson", "DrellYan", "W", "Z", "stop", "ttbar"],
"Processes" : {
"Diboson" : {
"Color" : "#fa7921",
"Contributions" : ["WW", "WZ", "ZZ"]},
"DrellYan": {
"Color" : "#5bc0eb",
"Contributions" : ["DYeeM08to15", "DYeeM15to40", "DYmumuM08to15", "DYmumuM15to40", "DYtautauM08to15", "DYtautauM15to40"]},
"W": {
"Color" : "#e55934",
"Contributions" : ["WenuJetsBVeto", "WenuWithB", "WenuNoJetsBVeto", "WmunuJetsBVeto", "WmunuWithB", "WmunuNoJetsBVeto", "WtaunuJetsBVeto", "WtaunuWithB", "WtaunuNoJetsBVeto"]},
"Z": {
"Color" : "#086788",
"Contributions" : ["Zee", "Zmumu", "Ztautau"]},
"stop": {
"Color" : "#fde74c",
"Contributions" : ["stop_tchan_top", "stop_tchan_antitop", "stop_schan", "stop_wtchan"]},
"ttbar": {
"Color" : "#9bc53d",
"Contributions" : ["ttbar_lep", "ttbar_had"]}
}
},
"data" : {
"Contributions": ["data_Egamma", "data_Muons"]}
},
"Depictions": {
"Order": ["Main", "Data/MC"],
"Definitions" : {
"Data/MC": {
"type" : "Agreement",
"Paintables" : ["data", "Stack"]
},
"Main": {
"type" : "Main",
"Paintables": ["Stack", "data"]
},
}
},
}
| 1.21875 | 1 |
modules/optimizations/dead_codes.py | OMGhozlan/deobshell | 0 | 3071 | <filename>modules/optimizations/dead_codes.py
# coding=utf-8
from ..logger import log_debug
from ..utils import parent_map, replace_node, is_prefixed_var, get_used_vars
def opt_unused_variable(ast):
parents = parent_map(ast)
used_vars = get_used_vars(ast)
for node in ast.iter():
if node.tag in ["AssignmentStatementAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
if subnodes[0].attrib["VariablePath"].lower() not in used_vars:
if not is_prefixed_var(subnodes[0].attrib["VariablePath"]):
log_debug("Remove assignement of unused variable %s" % (subnodes[0].attrib["VariablePath"]))
parents[node].remove(node)
return True
return False
def opt_remove_uninitialised_variable_usage(ast):
assigned = set()
for node in ast.iter():
if node.tag in ["AssignmentStatementAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
assigned.add(subnodes[0].attrib["VariablePath"].lower())
if node.tag in ["BinaryExpressionAst"]:
subnodes = list(node)
if subnodes[0].tag == "VariableExpressionAst":
variable = subnodes[0]
other = subnodes[1]
elif subnodes[1].tag == "VariableExpressionAst":
variable = subnodes[1]
other = subnodes[0]
else:
variable, other = None, None
if variable is not None and other is not None:
if variable.attrib["VariablePath"].lower() not in assigned:
if not is_prefixed_var(variable.attrib["VariablePath"]):
log_debug("Remove unassigned variable use '%s'" % (variable.attrib["VariablePath"]))
replace_node(ast, node, other)
return True
return False
| 2.328125 | 2 |
Convert Integer A to Integer B.py | RijuDasgupta9116/LintCode | 321 | 3072 | <reponame>RijuDasgupta9116/LintCode
"""
Determine the number of bits required to convert integer A to integer B
Example
Given n = 31, m = 14,return 2
(31)10=(11111)2
(14)10=(01110)2
"""
__author__ = 'Danyang'
class Solution:
def bitSwapRequired(self, a, b):
"""
:param a:
:param b:
:return: int
"""
a = self.to_bin(a)
b = self.to_bin(b)
diff = len(a)-len(b)
ret = 0
if diff<0:
a, b = b, a
diff *= -1
b = "0"*diff+b
for i in xrange(len(b)):
if a[i]!=b[i]:
ret += 1
return ret
def to_bin(self, n):
"""
2's complement
32-bit
:param n:
:return:
"""
"""
:param n:
:return:
"""
a = abs(n)
lst = []
while a>0:
lst.append(a%2)
a /= 2
# 2's complement
if n>=0:
lst.extend([0]*(32-len(lst)))
else:
pivot = -1
for i in xrange(len(lst)):
if pivot==-1 and lst[i]==1:
pivot = i
continue
if pivot!=-1:
lst[i] ^= 1
lst.extend([1]*(32-len(lst)))
return "".join(map(str, reversed(lst)))
if __name__=="__main__":
assert Solution().bitSwapRequired(1, -1)==31
assert Solution().bitSwapRequired(31, 14)==2
| 3.578125 | 4 |
examples/basic/findQSpark.py | myriadrf/pyLMS7002M | 46 | 3073 | from pyLMS7002M import *
print("Searching for QSpark...")
try:
QSpark = QSpark()
except:
print("QSpark not found")
exit(1)
print("\QSpark info:")
QSpark.printInfo() # print the QSpark board info
# QSpark.LMS7002_Reset() # reset the LMS7002M
lms7002 = QSpark.getLMS7002() # get the LMS7002M object
ver, rev, mask = lms7002.chipInfo # get the chip info
print("\nLMS7002M info:")
print("VER : "+str(ver))
print("REV : "+str(rev))
print("MASK : "+str(mask))
| 2.8125 | 3 |
macaddress/__init__.py | paradxum/django-macaddress | 42 | 3074 | from django.conf import settings
from netaddr import mac_unix, mac_eui48
import importlib
import warnings
class mac_linux(mac_unix):
"""MAC format with zero-padded all upper-case hex and colon separated"""
word_fmt = '%.2X'
def default_dialect(eui_obj=None):
# Check to see if a default dialect class has been specified in settings,
# using 'module.dialect_cls' string and use importlib and getattr to retrieve dialect class. 'module' is the module and
# 'dialect_cls' is the class name of the custom dialect. The dialect must either be defined or imported by the module's
# __init__.py if the module is a package.
from .fields import MACAddressField # Remove import at v1.4
if hasattr(settings, 'MACADDRESS_DEFAULT_DIALECT') and not MACAddressField.dialect:
module, dialect_cls = settings.MACADDRESS_DEFAULT_DIALECT.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls, mac_linux)
return dialect
else:
if MACAddressField.dialect: # Remove this "if" statement at v1.4
warnings.warn(
"The set_dialect class method on MACAddressField has been deprecated, in favor of the default_dialect "
"utility function and settings.MACADDRESS_DEFAULT_DIALECT. See macaddress.__init__.py source or the "
"project README for more information.",
DeprecationWarning,
)
return MACAddressField.dialect
if eui_obj:
return eui_obj.dialect
else:
return mac_linux
def format_mac(eui_obj, dialect):
# Format a EUI instance as a string using the supplied dialect class, allowing custom string classes by
# passing directly or as a string, a la 'module.dialect_cls', where 'module' is the module and 'dialect_cls'
# is the class name of the custom dialect. The dialect must either be defined or imported by the module's __init__.py if
# the module is a package.
if not isinstance(dialect, mac_eui48):
if isinstance(dialect, str):
module, dialect_cls = dialect.split('.')
dialect = getattr(importlib.import_module(module), dialect_cls)
eui_obj.dialect = dialect
return str(eui_obj)
from pkg_resources import get_distribution, DistributionNotFound
import os.path
try:
_dist = get_distribution('django-macaddress')
except DistributionNotFound:
__version__ = 'Please install this project with setup.py'
else:
__version__ = _dist.version
VERSION = __version__ # synonym
| 2.34375 | 2 |
textmagic/test/message_status_tests.py | dfstrauss/textmagic-sms-api-python | 2 | 3075 | import time
from textmagic.test import ONE_TEST_NUMBER
from textmagic.test import THREE_TEST_NUMBERS
from textmagic.test import TextMagicTestsBase
from textmagic.test import LiveUnsafeTests
class MessageStatusTestsBase(TextMagicTestsBase):
def sendAndCheckStatusTo(self, numbers):
message = 'sdfqwersdfgfdg'
response = self.client.send(message, numbers)
ids = response['message_id'].keys()
self.getStatus(ids, message)
return (ids, message)
def getStatus(self, ids, message):
response = self.client.message_status(ids)
self.assertKeysEqualExpectedKeys(response, ids)
statuses = []
for id in ids:
status = response[id]
expected_keys = ['status', 'text', 'reply_number', 'created_time']
if (len(status) == 4):
pass
elif (len(status) == 6):
expected_keys.append('completed_time')
expected_keys.append('credits_cost')
else:
self.fail("Unexpected number of return parameters: %s" % len(status))
self.assertKeysEqualExpectedKeys(status, expected_keys)
self.assertEquals(status['text'], message)
self.assertEquals(status['reply_number'], '447624800500')
self.assertTrue(isinstance(status['created_time'], time.struct_time))
if (len(status) == 6):
self.assertTrue(isinstance(status['completed_time'], time.struct_time))
self.assertTrue(isinstance(status['credits_cost'], float))
statuses.append(status['status'])
return statuses
class MessageStatusTests(MessageStatusTestsBase):
def testMessageStatusWhenSendingOneMessage(self):
self.sendAndCheckStatusTo(ONE_TEST_NUMBER)
def testMessageStatusWhenSendingThreeMessages(self):
self.sendAndCheckStatusTo(THREE_TEST_NUMBERS)
class LiveUnsafeMessageStatusTests(MessageStatusTestsBase, LiveUnsafeTests):
"""
This test is live-unsafe because it is intended to be sent to a real
telephone number. It keeps asking for message status until it receives
a "delivered" response.
"""
def testMessageStatusWhenPhoneIsSwitchedOff(self):
ids, message = self.sendAndCheckStatusTo(['27991114444'])
while True:
s, = self.getStatus(ids, message)
if (s == 'd'):
break
| 2.46875 | 2 |
apps/orders/models.py | LinkanDawang/FreshMallDemo | 0 | 3076 | from django.db import models
from utils.models import BaseModel
from users.models import User, Address
from goods.models import GoodsSKU
# Create your models here.
class OrderInfo(BaseModel):
"""订单信息"""
PAY_METHOD = ['1', '2']
PAY_METHOD_CHOICES = (
(1, "货到付款"),
(2, "支付宝"),
)
ORDER_STATUS_CHOICES = (
(1, "待支付"),
(2, "待发货"),
(3, "待收货"),
(4, "待评价"),
(5, "已完成"),
)
"""---------订单信息------------------------"""
PAY_METHODS = {
1: "货到付款",
2: "支付宝",
}
ORDER_STATUS = {
1: "待支付",
2: "待发货",
3: "待收货",
4: "待评价",
5: "已完成",
}
PAY_METHODS_ENUM = {
"CASH": 1,
"ALIPAY": 2
}
ORDER_STATUS_ENUM = {
"UNPAID": 1,
"UNSEND": 2,
"UNRECEIVED": 3,
"UNCOMMENT": 4,
"FINISHED": 5
}
order_id = models.CharField(max_length=64, primary_key=True, verbose_name="订单号")
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="下单用户")
address = models.ForeignKey(Address, on_delete=models.CASCADE, verbose_name="收获地址")
total_count = models.IntegerField(default=1, verbose_name="商品总数")
total_amount = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="商品总金额")
trans_cost = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="运费")
pay_method = models.SmallIntegerField(choices=PAY_METHOD_CHOICES, default=1, verbose_name="支付方式")
status = models.SmallIntegerField(choices=ORDER_STATUS_CHOICES, default=1, verbose_name="订单状态")
trade_id = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name="支付编号")
class Meta:
db_table = "df_order_info"
class OrderGoods(BaseModel):
"""订单商品"""
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name="订单")
sku = models.ForeignKey(GoodsSKU, on_delete=models.CASCADE, verbose_name="订单商品")
count = models.IntegerField(default=1, verbose_name="数量")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="单价")
comment = models.TextField(default="", verbose_name="评价信息")
class Meta:
db_table = "df_order_goods"
| 2.328125 | 2 |
event/arguments/prepare/event_vocab.py | hunterhector/DDSemantics | 0 | 3077 | <gh_stars>0
from collections import defaultdict, Counter
import os
import gzip
import json
import pickle
from json.decoder import JSONDecodeError
import logging
from typing import Dict
import pdb
from event import util
from event.arguments.prepare.slot_processor import get_simple_dep, is_propbank_dep
logger = logging.getLogger(__name__)
class TypedEventVocab:
unk_predicate = "unk_predicate-pred"
unk_arg_word = "unk_argument"
unk_frame = "unk_frame"
unk_fe = "unk_fe"
unk_prep = "unk_preposition"
unk_dep = "unk_dep"
unobserved_fe = "__unobserved_fe__"
unobserved_arg = "__unobserved_arg__"
ghost = "__ghost_component__"
def __init__(self, vocab_dir, event_data=None):
self.lookups: Dict[str, Dict[str, int]] = {}
self.oovs: Dict[str, str] = {}
self.vocab_dir = vocab_dir
if not os.path.exists(os.path.join(vocab_dir, "predicate.vocab")):
if event_data is None:
logging.error(
"Vocabulary file not exist and not data " "provided for counting."
)
logger.info("Counting vocabulary.")
vocab_counters = self.get_vocab_count(event_data)
for vocab_name, counter in vocab_counters.items():
raw_vocab_path = os.path.join(vocab_dir, vocab_name + ".vocab")
with open(raw_vocab_path, "w") as out:
for key, value in counter.most_common():
out.write("{}\t{}\n".format(key, value))
logger.info("Done vocabulary counting.")
# Now filter the vocabulary.
logger.info("Filtering vocabulary.")
filtered_vocab = self.filter_vocab(vocab_counters)
logger.info("Done filtering.")
logger.info("Writing filtered vocab to disk.")
for key, vocab in filtered_vocab.items():
with open(os.path.join(self.vocab_dir, key + ".vocab"), "w") as out:
for token, count in vocab:
out.write("{}\t{}\n".format(token, count))
self.pickle_counts()
logger.info("Done.")
else:
logger.info("Will not overwrite vocabulary, using existing.")
if not self.unpickle_counts():
logger.info("Reading counts from .vocab files.")
f_name: str
for f_name in os.listdir(vocab_dir):
if "_" in f_name and f_name.endswith(".vocab"):
vocab_type = f_name.split("_")[0]
else:
continue
self.lookups[vocab_type] = {}
self.oovs[vocab_type] = "unk_" + vocab_type
with open(os.path.join(vocab_dir, f_name)) as vocab_file:
index = 0
for line in vocab_file:
word, count = line.strip().split("\t")
self.lookups[vocab_type][word] = index
index += 1
logger.info(
"Loaded {} types for {}".format(
len(self.lookups[vocab_type]), vocab_type
)
)
self.pickle_counts()
def pickle_counts(self):
with open(os.path.join(self.vocab_dir, "lookups.pickle"), "wb") as out:
pickle.dump(self.lookups, out)
with open(os.path.join(self.vocab_dir, "oovs.pickle"), "wb") as out:
pickle.dump(self.oovs, out)
def unpickle_counts(self):
lookup_pickle = os.path.join(self.vocab_dir, "lookups.pickle")
oov_pickle = os.path.join(self.vocab_dir, "oovs.pickle")
if os.path.exists(lookup_pickle) and os.path.exists(oov_pickle):
logger.info("Directly loading pickled counts.")
with open(lookup_pickle, "rb") as lp:
self.lookups = pickle.load(lp)
with open(oov_pickle, "rb") as op:
self.oovs = pickle.load(op)
return True
else:
return False
def get_vocab_word(self, word, key):
if not word:
return self.oovs[key]
if word in self.lookups[key]:
return word
else:
return self.oovs[key]
@classmethod
def make_arg(cls, text, role):
if role == "NA":
return text + "-" + cls.unk_dep
else:
return text + "-" + role
@staticmethod
def make_predicate(text):
return text.lower() + "-pred"
@staticmethod
def make_fe(frame, fe):
# Do not use frame,fe format to alleviate sparsity.
return fe
def get_arg_entity_rep(self, arg, entity_text):
# If a specific entity text is provided.
rep = self.oovs["argument"]
if entity_text is not None:
# Use the argument's own text.
rep = self.get_vocab_word(entity_text, "argument")
if rep == self.oovs["argument"]:
# Use the text after hypen.
if "-" in entity_text:
rep = self.get_vocab_word(entity_text.split("-")[-1], "argument")
arg_text = arg["text"].lower()
if rep == self.oovs["argument"]:
# Fall back to use the argument's own text.
rep = self.get_vocab_word(arg_text, "argument")
if rep == self.oovs["argument"]:
if "-" in arg_text:
rep = self.get_vocab_word(arg_text.split("-")[-1], "argument")
if rep == self.oovs["argument"]:
# Fall back to NER tag.
if "ner" in arg:
rep = arg["ner"]
return rep
@classmethod
def get_unk_arg_rep(cls):
# This will create a full unknown argument, try to back off to
# a partial unknown argument if possible.
return cls.make_arg(cls.unk_arg_word, cls.unk_dep)
@classmethod
def get_unk_arg_with_dep(cls, dep):
"""Return a backoff version of the representation by using the
actual dep, but unk_arg
Args:
dep
"""
return cls.make_arg(cls.unk_arg_word, dep)
@classmethod
def get_arg_rep_no_dep(cls, entity_rep):
"""Return the backoff version of the argument representation by using
the unk_dep, but the actual entity.
Args:
entity_rep:
Returns:
"""
return cls.make_arg(entity_rep, cls.unk_dep)
def get_arg_rep(self, dep, entity_rep):
if dep.startswith("prep"):
dep = self.get_vocab_word(dep, "preposition")
arg_rep = self.make_arg(entity_rep, dep)
return arg_rep
def get_pred_rep(self, event):
"""
Take the predicates, and get the vocab index for it. This will first
use the predicate itself, if not found, it will try to use the verb
form.
:param event:
:return:
"""
pred = self.get_vocab_word(event["predicate"], "predicate")
if pred == self.oovs["predicate"]:
# Try to see if the verb form help.
if "verb_form" in event:
pred = self.get_vocab_word(event["verb_form"], "predicate")
return self.make_predicate(pred)
def get_fe_rep(self, frame_name, fe_role):
# return self.make_fe(frame_name, fe_role)
return self.get_vocab_word(self.make_fe(frame_name, fe_role), "fe")
@staticmethod
def filter_by_count(counter, min_count):
return [
(key, count) for key, count in counter.most_common() if count >= min_count
]
def filter_vocab(
self,
vocab_counters,
top_num_prep=150,
min_token_count=500,
min_fe_count=50,
min_frame_count=5,
):
filtered_vocab = {
"predicate_min_%d"
% min_token_count: self.filter_by_count(
vocab_counters["predicate"], min_token_count
),
"argument_min_%d"
% min_token_count: self.filter_by_count(
vocab_counters["argument"], min_token_count
),
"preposition_top_%d"
% top_num_prep: vocab_counters["preposition"].most_common(top_num_prep),
"fe_min_%d"
% min_fe_count: self.filter_by_count(vocab_counters["fe"], min_fe_count),
"frame_min_%d"
% min_frame_count: self.filter_by_count(
vocab_counters["frame"], min_frame_count
),
}
for key, counts in filtered_vocab.items():
# Use the base key name for the vocabulary, not including the
# cutoff, (i.e. predicate_min_50 -> predicate)
name = key.split("_")[0]
# Put oov token as a token int he vocab file.
oov = "unk_" + name
counts.insert(0, (oov, 0))
self.lookups[name] = {}
self.oovs[name] = oov
index = 0
for term, _ in counts:
self.lookups[name][term] = index
index += 1
return filtered_vocab
def get_vocab_count(self, data_path):
vocab_counters = defaultdict(Counter)
doc_count = 0
event_count = 0
with gzip.open(data_path) as data:
for line in data:
doc_info = json.loads(line)
for event in doc_info["events"]:
event_count += 1
predicate = event["predicate"]
vocab_counters["predicate"][predicate] += 1
frame = event["frame"]
if not frame == "NA":
vocab_counters["frame"][frame] += 1
for arg in event["arguments"]:
fe_name = arg["feName"]
syn_role = arg["dep"]
arg_text = arg["text"].lower()
vocab_counters["argument"][arg_text] += 1
if not fe_name == "NA":
vocab_counters["fe"][
self.make_fe(event["frame"], fe_name)
] += 1
if syn_role.startswith("prep"):
vocab_counters["preposition"][syn_role] += 1
doc_count += 1
if doc_count % 1000 == 0:
print(
"\rCounted vocab for {} events in "
"{} docs.".format(event_count, doc_count),
end="",
)
return vocab_counters
class EmbbedingVocab:
def __init__(self, vocab_file, with_padding=False, extras=None):
self.vocab_file = vocab_file
self.vocab = {}
self.tf = []
self.extras = []
self.pad = "__PADDING__"
self.padded = False
if with_padding:
# Paddings should be at 0.
self.padded = True
self.vocab[self.pad] = 0
self.tf.append(0)
if extras:
for name in extras:
self.add_extra(name)
self.__read_vocab()
@staticmethod
def with_extras(vocab_file):
"""
Create a EmbeddingVocab with unknown word slots and padding slot.
Args:
vocab_file:
Returns:
"""
return EmbbedingVocab(
vocab_file,
True,
[
TypedEventVocab.unk_frame,
TypedEventVocab.unk_fe,
TypedEventVocab.get_unk_arg_rep(),
TypedEventVocab.unobserved_arg,
TypedEventVocab.unobserved_fe,
TypedEventVocab.ghost,
],
)
def get_index(self, token, unk):
try:
return self.vocab[token]
except KeyError:
if unk:
return self.vocab[unk]
else:
return -1
def extra_size(self):
return len(self.extras)
def add_extra(self, name):
"""Add extra dimensions into the embedding vocab, used for special
tokens.
Args:
name:
Returns:
"""
if name in self.extras:
logger.info(
f"Extra {name} already exist in vocabulary "
f"at index {self.vocab[name]}"
)
return self.vocab[name]
else:
self.extras.append(name)
extra_index = len(self.vocab)
self.vocab[name] = extra_index
self.tf.append(0)
logger.info(
f"Adding {name} as extra dimension {extra_index} "
f"to {self.vocab_file}"
)
return extra_index
def get_size(self):
return len(self.vocab)
def vocab_items(self):
return self.vocab.items()
def get_term_freq(self, token):
return self.tf[self.get_index(token, None)]
def __read_vocab(self):
with open(self.vocab_file) as din:
index = len(self.vocab)
for line in din:
word, count = line.split()
self.vocab[word] = index
self.tf.append(int(count))
index += 1
def create_sentences(
doc,
event_vocab,
output_path,
include_frame=False,
use_simple_dep=False,
prop_arg_only=False,
):
if include_frame:
print("Adding frames to sentences.")
doc_count = 0
event_count = 0
with gzip.open(doc) as data, gzip.open(output_path, "w") as out:
for line in data:
try:
doc_info = json.loads(line)
except JSONDecodeError:
continue
sentence = []
represent_by_id = {}
for entity in doc_info["entities"]:
eid = entity["entityId"]
represent = entity["representEntityHead"]
represent_by_id[eid] = represent
for event in doc_info["events"]:
event_count += 1
sentence.append(event_vocab.get_pred_rep(event))
if include_frame and not event["frame"] == "NA":
frame = event_vocab.get_vocab_word(event["frame"], "frame")
sentence.append(frame)
for arg in event["arguments"]:
dep = arg["dep"]
if (
arg["argStart"] == event["predicateStart"]
and arg["argEnd"] == event["predicateEnd"]
):
dep = "root"
if use_simple_dep:
dep = get_simple_dep(dep)
if prop_arg_only and not is_propbank_dep(dep):
continue
sentence.append(
event_vocab.get_arg_rep(
dep, event_vocab.get_arg_entity_rep(arg, None)
)
)
if include_frame and not arg["feName"] == "NA":
fe = event_vocab.get_fe_rep(frame, arg["feName"])
if not fe == event_vocab.oovs["fe"]:
sentence.append(fe)
if "NA" in sentence:
pdb.set_trace()
doc_count += 1
out.write(str.encode(" ".join(sentence) + "\n"))
if event_count % 1000 == 0:
print(
"\rCreated sentences for {} documents, "
"{} events.".format(doc_count, event_count),
end="",
)
print(
"\rCreated sentences for {} documents, "
"{} events.\n".format(doc_count, event_count),
end="",
)
def write_sentences(
sent_out, event_data, event_vocab, include_frame, simple_dep, prop_arg
):
if not os.path.exists(sent_out):
os.makedirs(sent_out)
fname = "sent_with_frames.gz" if include_frame else "sent_pred_only.gz"
out = os.path.join(sent_out, fname)
if not os.path.exists(out):
create_sentences(
event_data,
event_vocab,
out,
include_frame=include_frame,
use_simple_dep=simple_dep,
prop_arg_only=prop_arg,
)
else:
logger.info(f"Will not overwrite {out}")
def main(event_data, vocab_dir, sent_out, prop_arg):
if not os.path.exists(vocab_dir):
os.makedirs(vocab_dir)
event_vocab = TypedEventVocab(vocab_dir, event_data=event_data)
logger.info("Done loading vocabulary.")
# The 3 boolean are : include_frame,simple_dep, prop_arg
if prop_arg:
# For propbank style training.
logger.info("Creating event sentences in propbank style")
# Include frame or not version for propbank, but always use simple dep
# and propbank style arguments.
write_sentences(sent_out, event_data, event_vocab, False, True, True)
write_sentences(sent_out, event_data, event_vocab, True, True, True)
else:
# For framenet style training.
logger.info("Creating event sentences in FrameNet style")
# Include frame or not version for framenet, but always use complex dep
# and framenet style arguments.
write_sentences(sent_out, event_data, event_vocab, True, False, False)
write_sentences(sent_out, event_data, event_vocab, False, False, False)
if __name__ == "__main__":
parser = util.OptionPerLineParser(
description="Event Vocabulary.", fromfile_prefix_chars="@"
)
parser.add_argument("--vocab_dir", type=str, help="Vocabulary directory.")
parser.add_argument("--input_data", type=str, help="Input data.")
parser.add_argument("--sent_out", type=str, help="Sentence out dir.")
parser.add_argument(
"--prop_arg", action="store_true", help="Propbank arg only.", default=False
)
util.set_basic_log()
args = parser.parse_args()
main(args.input_data, args.vocab_dir, args.sent_out, args.prop_arg)
| 2.28125 | 2 |
20.py | dexinl/kids_math | 0 | 3078 | #!/usr/bin/python
import random
count = 20
test_set = []
while count:
a = random.randrange(3,20)
b = random.randrange(3,20)
if a > b and a - b > 1:
if (b, a-b) not in test_set:
test_set.append((b, a-b))
count -= 1
elif b > a and b - a > 1:
if (a, b-a) not in test_set:
test_set.append((a, b-a))
count -= 1
else:
continue
for (a,b) in test_set:
print " %2d + %2d = " % (a,b)
| 3.71875 | 4 |
autovirt/equipment/domain/equipment.py | xlam/autovirt | 0 | 3079 | <reponame>xlam/autovirt
from enum import Enum
from functools import reduce
from math import ceil
from typing import Optional, Tuple
from autovirt import utils
from autovirt.exception import AutovirtError
from autovirt.structs import UnitEquipment, RepairOffer
logger = utils.get_logger()
# maximum allowed equipment price
PRICE_MAX = 100000
# value to add and sub from offer quality when filtering
QUALITY_DELTA = 3
class QualityType(Enum):
INSTALLED = "quality"
REQUIRED = "quality_required"
def quantity_to_repair(units: list[UnitEquipment]) -> int:
"""Calculate total quantity of equipment to repair on given units"""
return sum([unit.wear_quantity for unit in units])
def quantity_total(units: list[UnitEquipment]) -> int:
"""Calculate total equipment count on given units"""
return sum([unit.quantity for unit in units])
def filter_offers(
offers: list[RepairOffer], quality: float, quantity: int
) -> list[RepairOffer]:
# select units in range [quality-DELTA ... quality+DELTA] and having enough repair parts
filtered = list(filter(lambda x: x.quality > quality - QUALITY_DELTA, offers))
filtered = list(filter(lambda x: x.quality < quality + QUALITY_DELTA, filtered))
filtered = list(filter(lambda x: x.quantity > quantity, filtered))
filtered = list(filter(lambda x: x.price < PRICE_MAX, filtered))
return filtered
def expected_quality(
qual_rep: float, qual_inst: float, items_total: int, items_wear: int
) -> float:
return (
qual_inst * (items_total - items_wear) + qual_rep * items_wear
) / items_total
def select_offer(
offers: list[RepairOffer], units: list[UnitEquipment], quality: float = None
) -> RepairOffer:
if not quality:
quality = units[0].quality_required
qnt_rep = quantity_to_repair(units)
qnt_total = quantity_total(units)
qual_min = utils.get_min(units, QualityType.INSTALLED.value)
qual_exp = [
expected_quality(o.quality, qual_min, qnt_total, qnt_rep) for o in offers
]
qual_diff = [abs(qual - quality) for qual in qual_exp]
diff_norm = utils.normalize_array(qual_diff)
price_norm = utils.normalize_array([o.price for o in offers])
qp_dist = [p + q for (p, q) in zip(price_norm, diff_norm)]
summary: list = [
[o, price_norm[i], qual_exp[i], qual_diff[i], diff_norm[i], qp_dist[i]]
for i, o in enumerate(offers)
if qual_exp[i] >= quality
]
logger.info(f"listing filtered offers for quality of {quality}:")
for o in summary:
logger.info(
f"id: {o[0].id}, quality: {o[0].quality}, price: {o[0].price},"
f" quantity: {o[0].quantity}, qual_exp: {o[2]:.2f}, qp: {o[5]:.3f}"
)
minimum_qp_item = reduce(lambda x, y: x if x[5] < y[5] else y, summary)
return minimum_qp_item[0]
def select_offer_to_raise_quality(
unit: UnitEquipment, offers: list[RepairOffer], margin: float = 0
) -> Optional[Tuple[RepairOffer, int]]:
required = unit.quality_required + margin
quality_coeff = unit.quantity * (required - unit.quality)
offers = list(filter(lambda o: o.quality >= required, offers))
if not offers:
return None
offer = offers[0]
count_to_replace = ceil(quality_coeff / (offer.quality - unit.quality))
price = count_to_replace * offer.price
for offer_ in offers[1:]:
count = ceil(quality_coeff / (offer_.quality - unit.quality))
price_ = count * offer_.price
if price_ < price:
offer = offer_
count_to_replace = count
return offer, count_to_replace
def split_by_quality(
units: list[UnitEquipment], quality_type: QualityType = QualityType.REQUIRED
) -> dict[float, list[UnitEquipment]]:
"""Split units by quality (required or installed)"""
res: dict[float, list[UnitEquipment]] = {}
for unit in units:
quality = getattr(unit, quality_type.value)
if quality not in res.keys():
res[quality] = []
res[quality].append(unit)
return res
def split_mismatch_quality_units(
units: list[UnitEquipment],
) -> tuple[list[UnitEquipment], list[UnitEquipment]]:
"""Split units into 'normal' and 'mismatch' groups.
Mismatched unit have installed equipment of lower quality then required.
We need to treat them in different manner then normal while repairing.
"""
normal = []
mismatch = []
for unit in units:
if unit.quality < unit.quality_required:
mismatch.append(unit)
else:
normal.append(unit)
return normal, mismatch
| 2.625 | 3 |
day09/part2.py | mtn/advent16 | 0 | 3080 | <gh_stars>0
#!/usr/bin/env python3
import re
with open("input.txt") as f:
content = f.read().strip()
def ulen(content):
ans = 0
i = 0
while i < len(content):
if content[i] == "(":
end = content[i:].find(")") + i
instr = content[i+1:end]
chars, times = map(int, content[i+1:end].split("x"))
to_copy = content[end+1:end+1+chars]
to_copy_len = ulen(to_copy)
ans += times * to_copy_len
i = end + 1 + chars
else:
ans += 1
i += 1
return ans
print(ulen(content))
| 3.171875 | 3 |
cirq-core/cirq/contrib/quimb/mps_simulator_test.py | Nexuscompute/Cirq | 0 | 3081 | <gh_stars>0
# pylint: disable=wrong-or-nonexistent-copyright-notice
import itertools
import math
import numpy as np
import pytest
import sympy
import cirq
import cirq.contrib.quimb as ccq
import cirq.testing
from cirq import value
def assert_same_output_as_dense(circuit, qubit_order, initial_state=0, grouping=None):
mps_simulator = ccq.mps_simulator.MPSSimulator(grouping=grouping)
ref_simulator = cirq.Simulator()
actual = mps_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=initial_state)
expected = ref_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=initial_state)
np.testing.assert_allclose(
actual.final_state.to_numpy(), expected.final_state_vector, atol=1e-4
)
assert len(actual.measurements) == 0
def test_various_gates_1d():
gate_op_cls = [cirq.I, cirq.H, cirq.X, cirq.Y, cirq.Z, cirq.T]
cross_gate_op_cls = [cirq.CNOT, cirq.SWAP]
q0, q1 = cirq.LineQubit.range(2)
for q0_gate_op in gate_op_cls:
for q1_gate_op in gate_op_cls:
for cross_gate_op in cross_gate_op_cls:
circuit = cirq.Circuit(q0_gate_op(q0), q1_gate_op(q1), cross_gate_op(q0, q1))
for initial_state in range(2 * 2):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_various_gates_1d_flip():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q1), cirq.CNOT(q1, q0))
assert_same_output_as_dense(circuit=circuit, qubit_order=[q0, q1])
assert_same_output_as_dense(circuit=circuit, qubit_order=[q1, q0])
def test_various_gates_2d():
gate_op_cls = [cirq.I, cirq.H]
cross_gate_op_cls = [cirq.CNOT, cirq.SWAP]
q0, q1, q2, q3, q4, q5 = cirq.GridQubit.rect(3, 2)
for q0_gate_op in gate_op_cls:
for q1_gate_op in gate_op_cls:
for q2_gate_op in gate_op_cls:
for q3_gate_op in gate_op_cls:
for cross_gate_op1 in cross_gate_op_cls:
for cross_gate_op2 in cross_gate_op_cls:
circuit = cirq.Circuit(
q0_gate_op(q0),
q1_gate_op(q1),
cross_gate_op1(q0, q1),
q2_gate_op(q2),
q3_gate_op(q3),
cross_gate_op2(q3, q1),
)
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1, q2, q3, q4, q5]
)
def test_grouping():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(
cirq.X(q0) ** 0.1,
cirq.Y(q1) ** 0.2,
cirq.Z(q2) ** 0.3,
cirq.CNOT(q0, q1),
cirq.Y(q1) ** 0.4,
)
groupings = [
None,
{q0: 0, q1: 1, q2: 2},
{q0: 0, q1: 0, q2: 1},
{q0: 0, q1: 1, q2: 0},
{q0: 1, q1: 0, q2: 0},
{q0: 0, q1: 0, q2: 0},
]
for grouping in groupings:
for initial_state in range(2 * 2 * 2):
assert_same_output_as_dense(
circuit=circuit,
qubit_order=[q0, q1, q2],
initial_state=initial_state,
grouping=grouping,
)
def test_grouping_does_not_overlap():
q0, q1 = cirq.LineQubit.range(2)
mps_simulator = ccq.mps_simulator.MPSSimulator(grouping={q0: 0})
with pytest.raises(ValueError, match="Grouping must cover exactly the qubits"):
mps_simulator.simulate(cirq.Circuit(), qubit_order={q0: 0, q1: 1})
def test_same_partial_trace():
qubit_order = cirq.LineQubit.range(2)
q0, q1 = qubit_order
mps_simulator = ccq.mps_simulator.MPSSimulator()
for _ in range(50):
for initial_state in range(4):
circuit = cirq.testing.random_circuit(qubit_order, 3, 0.9)
expected_density_matrix = cirq.final_density_matrix(
circuit, qubit_order=qubit_order, initial_state=initial_state
)
expected_partial_trace = cirq.partial_trace(
expected_density_matrix.reshape(2, 2, 2, 2), keep_indices=[0]
)
final_state = mps_simulator.simulate(
circuit, qubit_order=qubit_order, initial_state=initial_state
).final_state
actual_density_matrix = final_state.partial_trace([q0, q1])
actual_partial_trace = final_state.partial_trace([q0])
np.testing.assert_allclose(actual_density_matrix, expected_density_matrix, atol=1e-4)
np.testing.assert_allclose(actual_partial_trace, expected_partial_trace, atol=1e-4)
def test_probs_dont_sum_up_to_one():
q0 = cirq.NamedQid('q0', dimension=2)
circuit = cirq.Circuit(cirq.measure(q0))
simulator = ccq.mps_simulator.MPSSimulator(
simulation_options=ccq.mps_simulator.MPSOptions(sum_prob_atol=-0.5)
)
with pytest.raises(ValueError, match="Sum of probabilities exceeds tolerance"):
simulator.run(circuit, repetitions=1)
def test_empty():
q0 = cirq.NamedQid('q0', dimension=2)
q1 = cirq.NamedQid('q1', dimension=3)
q2 = cirq.NamedQid('q2', dimension=5)
circuit = cirq.Circuit()
for initial_state in range(2 * 3 * 5):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1, q2], initial_state=initial_state
)
def test_cnot():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q0, q1))
for initial_state in range(4):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_cnot_flipped():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q1, q0))
for initial_state in range(4):
assert_same_output_as_dense(
circuit=circuit, qubit_order=[q0, q1], initial_state=initial_state
)
def test_simulation_state():
q0, q1 = qubit_order = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.CNOT(q1, q0))
mps_simulator = ccq.mps_simulator.MPSSimulator()
ref_simulator = cirq.Simulator()
for initial_state in range(4):
args = mps_simulator._create_simulation_state(initial_state=initial_state, qubits=(q0, q1))
actual = mps_simulator.simulate(circuit, qubit_order=qubit_order, initial_state=args)
expected = ref_simulator.simulate(
circuit, qubit_order=qubit_order, initial_state=initial_state
)
np.testing.assert_allclose(
actual.final_state.to_numpy(), expected.final_state_vector, atol=1e-4
)
assert len(actual.measurements) == 0
def test_three_qubits():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.CCX(q0, q1, q2))
with pytest.raises(ValueError, match="Can only handle 1 and 2 qubit operations"):
assert_same_output_as_dense(circuit=circuit, qubit_order=[q0, q1, q2])
def test_measurement_1qubit():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.X(q0), cirq.H(q1), cirq.measure(q1))
simulator = ccq.mps_simulator.MPSSimulator()
result = simulator.run(circuit, repetitions=100)
assert sum(result.measurements['q(1)'])[0] < 80
assert sum(result.measurements['q(1)'])[0] > 20
def test_reset():
q = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
c = cirq.Circuit(cirq.X(q), cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['q(0)'][0] == 0
c = cirq.Circuit(cirq.H(q), cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['q(0)'][0] == 0
c = cirq.Circuit(cirq.reset(q), cirq.measure(q))
assert simulator.sample(c)['q(0)'][0] == 0
def test_measurement_2qubits():
q0, q1, q2 = cirq.LineQubit.range(3)
circuit = cirq.Circuit(cirq.H(q0), cirq.H(q1), cirq.H(q2), cirq.measure(q0, q2))
simulator = ccq.mps_simulator.MPSSimulator()
repetitions = 1024
measurement = simulator.run(circuit, repetitions=repetitions).measurements['q(0),q(2)']
result_counts = {'00': 0, '01': 0, '10': 0, '11': 0}
for i in range(repetitions):
key = str(measurement[i, 0]) + str(measurement[i, 1])
result_counts[key] += 1
for result_count in result_counts.values():
# Expected value is 1/4:
assert result_count > repetitions * 0.15
assert result_count < repetitions * 0.35
def test_measurement_str():
q0 = cirq.NamedQid('q0', dimension=3)
circuit = cirq.Circuit(cirq.measure(q0))
simulator = ccq.mps_simulator.MPSSimulator()
result = simulator.run(circuit, repetitions=7)
assert str(result) == "q0 (d=3)=0000000"
def test_trial_result_str():
q0 = cirq.LineQubit(0)
final_simulator_state = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(),
)
result = ccq.mps_simulator.MPSTrialResult(
params=cirq.ParamResolver({}),
measurements={'m': np.array([[1]])},
final_simulator_state=final_simulator_state,
)
assert 'output state: TensorNetwork' in str(result)
def test_trial_result_repr_pretty():
q0 = cirq.LineQubit(0)
final_simulator_state = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(),
)
result = ccq.mps_simulator.MPSTrialResult(
params=cirq.ParamResolver({}),
measurements={'m': np.array([[1]])},
final_simulator_state=final_simulator_state,
)
cirq.testing.assert_repr_pretty_contains(result, 'output state: TensorNetwork')
cirq.testing.assert_repr_pretty(result, "cirq.MPSTrialResult(...)", cycle=True)
def test_empty_step_result():
q0 = cirq.LineQubit(0)
sim = ccq.mps_simulator.MPSSimulator()
step_result = next(sim.simulate_moment_steps(cirq.Circuit(cirq.measure(q0))))
assert 'TensorNetwork' in str(step_result)
def test_step_result_repr_pretty():
q0 = cirq.LineQubit(0)
sim = ccq.mps_simulator.MPSSimulator()
step_result = next(sim.simulate_moment_steps(cirq.Circuit(cirq.measure(q0))))
cirq.testing.assert_repr_pretty_contains(step_result, 'TensorNetwork')
cirq.testing.assert_repr_pretty(step_result, "cirq.MPSSimulatorStepResult(...)", cycle=True)
def test_state_equal():
q0, q1 = cirq.LineQubit.range(2)
state0 = ccq.mps_simulator.MPSState(
qubits=(q0,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1e-3, sum_prob_atol=1e-3),
)
state1a = ccq.mps_simulator.MPSState(
qubits=(q1,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1e-3, sum_prob_atol=1e-3),
)
state1b = ccq.mps_simulator.MPSState(
qubits=(q1,),
prng=value.parse_random_state(0),
simulation_options=ccq.mps_simulator.MPSOptions(cutoff=1729.0, sum_prob_atol=1e-3),
)
assert state0 == state0
assert state0 != state1a
assert state1a != state1b
def test_random_circuits_equal_more_rows():
circuit = cirq.testing.random_circuit(
qubits=cirq.GridQubit.rect(3, 2), n_moments=6, op_density=1.0
)
qubits = circuit.all_qubits()
assert_same_output_as_dense(circuit, qubits)
def test_supremacy_equal_more_cols():
circuit = cirq.testing.random_circuit(
qubits=cirq.GridQubit.rect(2, 3), n_moments=6, op_density=1.0
)
qubits = circuit.all_qubits()
assert_same_output_as_dense(circuit, qubits)
def test_tensor_index_names():
qubits = cirq.LineQubit.range(12)
qubit_map = {qubit: i for i, qubit in enumerate(qubits)}
state = ccq.mps_simulator.MPSState(qubits=qubit_map, prng=value.parse_random_state(0))
assert state.i_str(0) == "i_00"
assert state.i_str(11) == "i_11"
assert state.mu_str(0, 3) == "mu_0_3"
assert state.mu_str(3, 0) == "mu_0_3"
def test_simulate_moment_steps_sample():
q0, q1 = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1))
simulator = ccq.mps_simulator.MPSSimulator()
for i, step in enumerate(simulator.simulate_moment_steps(circuit)):
if i == 0:
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 1.0 / math.sqrt(2), 0.0]),
)
# There are two "Tensor()" copies in the string.
assert len(str(step).split('Tensor(')) == 3
samples = step.sample([q0, q1], repetitions=10)
for sample in samples:
assert np.array_equal(sample, [True, False]) or np.array_equal(
sample, [False, False]
)
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 1.0 / math.sqrt(2), 0.0]),
)
else:
np.testing.assert_almost_equal(
step._simulator_state().to_numpy(),
np.asarray([1.0 / math.sqrt(2), 0.0, 0.0, 1.0 / math.sqrt(2)]),
)
# There are two "Tensor()" copies in the string.
assert len(str(step).split('Tensor(')) == 3
samples = step.sample([q0, q1], repetitions=10)
for sample in samples:
assert np.array_equal(sample, [True, True]) or np.array_equal(
sample, [False, False]
)
def test_sample_seed():
q = cirq.NamedQubit('q')
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
simulator = ccq.mps_simulator.MPSSimulator(seed=1234)
result = simulator.run(circuit, repetitions=20)
measured = result.measurements['q']
result_string = ''.join(map(lambda x: str(int(x[0])), measured))
assert result_string == '01011001110111011011'
def test_run_no_repetitions():
q0 = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0))
result = simulator.run(circuit, repetitions=0)
assert len(result.measurements['q(0)']) == 0
def test_run_parameters_not_resolved():
a = cirq.LineQubit(0)
simulator = ccq.mps_simulator.MPSSimulator()
circuit = cirq.Circuit(cirq.XPowGate(exponent=sympy.Symbol('a'))(a), cirq.measure(a))
with pytest.raises(ValueError, match='symbols were not specified'):
_ = simulator.run_sweep(circuit, cirq.ParamResolver({}))
def test_deterministic_gate_noise():
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.I(q), cirq.measure(q))
simulator1 = ccq.mps_simulator.MPSSimulator(noise=cirq.X)
result1 = simulator1.run(circuit, repetitions=10)
simulator2 = ccq.mps_simulator.MPSSimulator(noise=cirq.X)
result2 = simulator2.run(circuit, repetitions=10)
assert result1 == result2
simulator3 = ccq.mps_simulator.MPSSimulator(noise=cirq.Z)
result3 = simulator3.run(circuit, repetitions=10)
assert result1 != result3
def test_nondeterministic_mixture_noise():
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.I(q), cirq.measure(q))
simulator = ccq.mps_simulator.MPSSimulator(
noise=cirq.ConstantQubitNoiseModel(cirq.depolarize(0.5))
)
result1 = simulator.run(circuit, repetitions=50)
result2 = simulator.run(circuit, repetitions=50)
assert result1 != result2
def test_unsupported_noise_fails():
with pytest.raises(ValueError, match='noise must be unitary or mixture but was'):
ccq.mps_simulator.MPSSimulator(noise=cirq.amplitude_damp(0.5))
def test_state_copy():
sim = ccq.mps_simulator.MPSSimulator()
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.H(q))
state_Ms = []
for step in sim.simulate_moment_steps(circuit):
state_Ms.append(step.state.M)
for x, y in itertools.combinations(state_Ms, 2):
assert len(x) == len(y)
for i in range(len(x)):
assert not np.shares_memory(x[i], y[i])
def test_simulation_state_initializer():
s = ccq.mps_simulator.MPSState(
qubits=(cirq.LineQubit(0),),
prng=np.random.RandomState(0),
classical_data=cirq.ClassicalDataDictionaryStore(
_records={cirq.MeasurementKey('test'): [(4,)]}
),
)
assert s.qubits == (cirq.LineQubit(0),)
assert s.log_of_measurement_results == {'test': [4]}
def test_act_on_gate():
args = ccq.mps_simulator.MPSState(qubits=cirq.LineQubit.range(3), prng=np.random.RandomState(0))
cirq.act_on(cirq.X, args, [cirq.LineQubit(1)])
np.testing.assert_allclose(
args.state_vector().reshape((2, 2, 2)),
cirq.one_hot(index=(0, 1, 0), shape=(2, 2, 2), dtype=np.complex64),
)
def test_deprecated():
prng = np.random.RandomState(0)
with cirq.testing.assert_deprecated('log_of_measurement_results', deadline='0.16', count=2):
_ = ccq.mps_simulator.MPSState(
qubits=cirq.LineQubit.range(3), prng=prng, log_of_measurement_results={}
)
with cirq.testing.assert_deprecated('positional', deadline='0.16'):
_ = ccq.mps_simulator.MPSState(cirq.LineQubit.range(3), prng=prng)
| 1.9375 | 2 |
e2e_tests/tests/config.py | winding-lines/determined | 0 | 3082 | import os
from pathlib import Path
from typing import Any, Dict
from determined.common import util
MASTER_SCHEME = "http"
MASTER_IP = "localhost"
MASTER_PORT = "8080"
DET_VERSION = None
DEFAULT_MAX_WAIT_SECS = 1800
MAX_TASK_SCHEDULED_SECS = 30
MAX_TRIAL_BUILD_SECS = 90
DEFAULT_TF1_CPU_IMAGE = "determinedai/environments:py-3.7-pytorch-1.7-tf-1.15-cpu-08f9c9b"
DEFAULT_TF2_CPU_IMAGE = (
"determinedai/environments:py-3.8-pytorch-1.9-lightning-1.3-tf-2.4-cpu-08f9c9b"
)
DEFAULT_TF1_GPU_IMAGE = "determinedai/environments:cuda-10.2-pytorch-1.7-tf-1.15-gpu-08f9c9b"
DEFAULT_TF2_GPU_IMAGE = (
"determinedai/environments:cuda-11.1-pytorch-1.9-lightning-1.3-tf-2.4-gpu-08f9c9b"
)
TF1_CPU_IMAGE = os.environ.get("TF1_CPU_IMAGE") or DEFAULT_TF1_CPU_IMAGE
TF2_CPU_IMAGE = os.environ.get("TF2_CPU_IMAGE") or DEFAULT_TF2_CPU_IMAGE
TF1_GPU_IMAGE = os.environ.get("TF1_GPU_IMAGE") or DEFAULT_TF1_GPU_IMAGE
TF2_GPU_IMAGE = os.environ.get("TF2_GPU_IMAGE") or DEFAULT_TF2_GPU_IMAGE
GPU_ENABLED = os.environ.get("DET_TEST_GPU_ENABLED", "1") not in ("0", "false")
PROJECT_ROOT_PATH = Path(__file__).resolve().parents[2]
def fixtures_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "fixtures", path)
def tutorials_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/tutorials", path)
def cv_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/computer_vision", path)
def nlp_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/nlp", path)
def nas_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/nas", path)
def meta_learning_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/meta_learning", path)
def gan_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/gan", path)
def decision_trees_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/decision_trees", path)
def features_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/features", path)
def model_hub_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../model_hub/examples", path)
def graphs_examples_path(path: str) -> str:
return os.path.join(os.path.dirname(__file__), "../../examples/graphs", path)
def load_config(config_path: str) -> Any:
with open(config_path) as f:
config = util.safe_load_yaml_with_exceptions(f)
return config
def make_master_url(suffix: str = "") -> str:
return "{}://{}:{}/{}".format(MASTER_SCHEME, MASTER_IP, MASTER_PORT, suffix)
def set_global_batch_size(config: Dict[Any, Any], batch_size: int) -> Dict[Any, Any]:
config = config.copy()
config["hyperparameters"]["global_batch_size"] = batch_size
return config
def set_slots_per_trial(config: Dict[Any, Any], slots: int) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("resources", {})
config["resources"]["slots_per_trial"] = slots
return config
def set_max_length(config: Dict[Any, Any], max_length: Dict[str, int]) -> Dict[Any, Any]:
config = config.copy()
config["searcher"]["max_length"] = max_length
return config
def set_min_validation_period(
config: Dict[Any, Any], min_validation_period: Dict[str, int]
) -> Dict[Any, Any]:
config = config.copy()
config["min_validation_period"] = min_validation_period
return config
def set_min_checkpoint_period(
config: Dict[Any, Any], min_checkpoint_period: Dict[str, int]
) -> Dict[Any, Any]:
config = config.copy()
config["min_checkpoint_period"] = min_checkpoint_period
return config
def set_aggregation_frequency(config: Dict[Any, Any], aggregation_frequency: int) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("optimizations", {})
config["optimizations"]["aggregation_frequency"] = aggregation_frequency
return config
def set_tensor_auto_tuning(config: Dict[Any, Any], auto_tune: bool) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("optimizations", {})
config["optimizations"]["auto_tune_tensor_fusion"] = auto_tune
return config
def set_image(config: Dict[Any, Any], cpu_image: str, gpu_image: str) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("environment", {})
config["environment"]["image"] = {"cpu": cpu_image, "gpu": gpu_image}
return config
def set_tf1_image(config: Dict[Any, Any]) -> Dict[Any, Any]:
return set_image(config, TF1_CPU_IMAGE, TF1_GPU_IMAGE)
def set_tf2_image(config: Dict[Any, Any]) -> Dict[Any, Any]:
return set_image(config, TF2_CPU_IMAGE, TF2_GPU_IMAGE)
def set_shared_fs_data_layer(config: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config["data_layer"] = {}
config["data_layer"]["type"] = "shared_fs"
return config
def set_s3_data_layer(config: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config["data_layer"] = {}
config["data_layer"]["type"] = "s3"
config["data_layer"]["bucket"] = "yogadl-test"
config["data_layer"]["bucket_directory_path"] = "determined_integration_tests"
return config
def set_random_seed(config: Dict[Any, Any], seed: int) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("reproducibility", {})
config["reproducibility"]["experiment_seed"] = seed
return config
def set_hparam(config: Dict[Any, Any], name: str, value: Any) -> Dict[Any, Any]:
config = config.copy()
config["hyperparameters"][name] = {"type": "const", "val": value}
return config
def set_perform_initial_validation(config: Dict[Any, Any], init_val: bool) -> Dict[Any, Any]:
config = config.copy()
config["perform_initial_validation"] = init_val
return config
def set_pod_spec(config: Dict[Any, Any], pod_spec: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("environment", {})
config["environment"]["pod_spec"] = pod_spec
return config
def set_profiling_enabled(config: Dict[Any, Any]) -> Dict[Any, Any]:
config = config.copy()
config.setdefault("profiling", {})
config["profiling"]["enabled"] = True
return config
def set_entrypoint(config: Dict[Any, Any], entrypoint: str) -> Dict[Any, Any]:
config = config.copy()
config["entrypoint"] = entrypoint
return config
| 2.046875 | 2 |
src/greenbudget/app/subaccount/serializers.py | nickmflorin/django-proper-architecture-testing | 0 | 3083 | from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers, exceptions
from greenbudget.lib.rest_framework_utils.fields import ModelChoiceField
from greenbudget.lib.rest_framework_utils.serializers import (
EnhancedModelSerializer)
from greenbudget.app.budget.models import BaseBudget
from greenbudget.app.common.serializers import (
EntitySerializer,
AbstractBulkUpdateSerializer,
create_bulk_create_serializer
)
from greenbudget.app.fringe.models import Fringe
from greenbudget.app.group.models import (
BudgetSubAccountGroup,
TemplateSubAccountGroup
)
from .models import SubAccount, BudgetSubAccount, TemplateSubAccount
class SubAccountSimpleSerializer(EnhancedModelSerializer):
id = serializers.IntegerField(read_only=True)
type = serializers.CharField(read_only=True)
identifier = serializers.CharField(
required=False,
allow_blank=False,
allow_null=True,
trim_whitespace=False
)
description = serializers.CharField(
required=False,
allow_blank=False,
allow_null=True,
trim_whitespace=False
)
name = serializers.CharField(
required=False,
allow_blank=True,
allow_null=False,
trim_whitespace=False
)
class Meta:
model = SubAccount
fields = ('id', 'name', 'identifier', 'type', 'description')
class SubAccountSerializer(SubAccountSimpleSerializer):
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
updated_by = serializers.PrimaryKeyRelatedField(read_only=True)
created_at = serializers.DateTimeField(read_only=True)
updated_at = serializers.DateTimeField(read_only=True)
quantity = serializers.IntegerField(
required=False,
allow_null=True
)
rate = serializers.FloatField(required=False, allow_null=True)
multiplier = serializers.FloatField(required=False, allow_null=True)
estimated = serializers.FloatField(read_only=True)
unit = ModelChoiceField(
required=False,
choices=SubAccount.UNITS,
allow_null=True
)
budget = serializers.PrimaryKeyRelatedField(read_only=True)
subaccounts = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
ancestors = EntitySerializer(many=True, read_only=True)
siblings = EntitySerializer(many=True, read_only=True)
account = serializers.IntegerField(read_only=True, source='account.pk')
object_id = serializers.IntegerField(read_only=True)
parent_type = serializers.ChoiceField(
choices=["account", "subaccount"],
read_only=True
)
fringes = serializers.PrimaryKeyRelatedField(
many=True,
required=False,
queryset=Fringe.objects.filter(budget__trash=False)
)
class Meta:
model = SubAccount
fields = SubAccountSimpleSerializer.Meta.fields + (
'identifier', 'name', 'created_by', 'updated_by', 'created_at',
'updated_at', 'quantity', 'rate', 'multiplier', 'unit', 'account',
'object_id', 'parent_type', 'ancestors', 'estimated', 'subaccounts',
'budget', 'siblings', 'fringes')
def validate(self, attrs):
if self.instance is not None and self.instance.subaccounts.count() != 0:
if any([field in attrs for field in self.instance.DERIVING_FIELDS]):
raise exceptions.ValidationError(
"Field can only be updated when the sub account is not "
"derived."
)
return super().validate(attrs)
class BudgetSubAccountSerializer(SubAccountSerializer):
actual = serializers.FloatField(read_only=True)
variance = serializers.FloatField(read_only=True)
group = serializers.PrimaryKeyRelatedField(
required=False,
allow_null=True,
queryset=BudgetSubAccountGroup.objects.all()
)
class Meta:
model = BudgetSubAccount
fields = SubAccountSerializer.Meta.fields + (
'actual', 'variance', 'group')
class TemplateSubAccountSerializer(SubAccountSerializer):
group = serializers.PrimaryKeyRelatedField(
required=False,
allow_null=True,
queryset=TemplateSubAccountGroup.objects.all()
)
class Meta:
model = TemplateSubAccount
fields = SubAccountSerializer.Meta.fields + ('group', )
def create_bulk_create_subaccounts_serializer(model_cls):
data_serializer = BudgetSubAccountSerializer
if model_cls is TemplateSubAccount:
data_serializer = TemplateSubAccountSerializer
base_serializer = create_bulk_create_serializer(data_serializer)
class BulkCreateSubAccountsSerializer(base_serializer):
class Meta(base_serializer.Meta):
model = BaseBudget
def get_serializer_context(self, instance):
return {'parent': instance}
def perform_save(self, serializer, instance, validated_data):
# Note that the updated_by argument is the user updating the
# Account by adding new SubAccount(s), so the SubAccount(s)
# should be denoted as having been created by this user.
return serializer.save(
updated_by=validated_data['updated_by'],
created_by=validated_data['updated_by'],
object_id=instance.pk,
content_type=ContentType.objects.get_for_model(model_cls),
parent=instance,
budget=instance.budget
)
return BulkCreateSubAccountsSerializer
def create_subaccount_bulk_change_serializer(model_cls):
base_serializer = BudgetSubAccountSerializer
if model_cls is TemplateSubAccount:
base_serializer = TemplateSubAccountSerializer
class SubAccountBulkChangeSerializer(base_serializer):
id = serializers.PrimaryKeyRelatedField(
required=True,
queryset=model_cls.objects.all()
)
def validate_id(self, instance):
account = self.parent.parent.instance
if account != instance.parent:
raise exceptions.ValidationError(
"The sub-account %s does not belong to account %s."
% (instance.pk, account.pk)
)
return instance
return SubAccountBulkChangeSerializer
def create_bulk_update_subaccounts_serializer(model_cls):
class BulkUpdateSubAccountsSerializer(AbstractBulkUpdateSerializer):
data = create_subaccount_bulk_change_serializer(model_cls)(
many=True, nested=True)
class Meta:
model = BaseBudget
fields = ('data', )
def update(self, instance, validated_data):
for subaccount, change in validated_data['data']:
serializer = SubAccountSerializer(
instance=subaccount,
data=change,
partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save(
updated_by=validated_data['updated_by'],
suppress_budget_update=validated_data.get(
'suppress_budget_update', False)
)
return instance
return BulkUpdateSubAccountsSerializer
| 1.695313 | 2 |
modules/dbnd/src/dbnd/_core/tracking/managers/callable_tracking.py | busunkim96/dbnd | 224 | 3084 | import contextlib
import logging
import typing
from typing import Any, Dict, Tuple
import attr
from dbnd._core.configuration import get_dbnd_project_config
from dbnd._core.constants import (
RESULT_PARAM,
DbndTargetOperationStatus,
DbndTargetOperationType,
TaskRunState,
)
from dbnd._core.current import (
current_task_run,
get_databand_run,
is_verbose,
try_get_current_task,
)
from dbnd._core.errors.errors_utils import log_exception
from dbnd._core.log.external_exception_logging import log_exception_to_server
from dbnd._core.parameter.parameter_definition import ParameterDefinition
from dbnd._core.parameter.parameter_value import ParameterFilters
from dbnd._core.settings import TrackingConfig
from dbnd._core.task.tracking_task import TrackingTask
from dbnd._core.task_build.task_context import try_get_current_task
from dbnd._core.task_build.task_definition import TaskDefinition
from dbnd._core.task_build.task_results import FuncResultParameter
from dbnd._core.task_run.task_run import TaskRun
from dbnd._core.task_run.task_run_error import TaskRunError
from dbnd._core.utils.callable_spec import args_to_kwargs
from dbnd._core.utils.timezone import utcnow
from targets import InMemoryTarget, Target
from targets.value_meta import ValueMetaConf
from targets.values import get_value_type_of_obj
if typing.TYPE_CHECKING:
from dbnd._core.task_build.task_decorator import TaskDecorator
logger = logging.getLogger(__name__)
@attr.s
class TrackedFuncCallWithResult(object):
call_args = attr.ib() # type: Tuple[Any]
call_kwargs = attr.ib() # type: Dict[str,Any]
callable = attr.ib()
result = attr.ib(default=None)
def set_result(self, value):
self.result = value
return value
def invoke(self):
func = self.callable
return func(*self.call_args, **self.call_kwargs)
class CallableTrackingManager(object):
def __init__(self, task_decorator):
# type: (CallableTrackingManager, TaskDecorator) -> None
self.task_decorator = task_decorator
self._tracking_task_definition = None
self._call_count = 0
self._call_as_func = False
self._max_call_count = get_dbnd_project_config().max_calls_per_run
@property
def callable(self):
return self.task_decorator.class_or_func
def get_tracking_task_definition(self):
if not self._tracking_task_definition:
self._tracking_task_definition = self._build_tracking_task_definition()
return self._tracking_task_definition
def _build_tracking_task_definition(self):
return TaskDefinition.from_task_decorator(task_decorator=self.task_decorator)
def _call_count_limit_exceeded(self):
if not self._call_as_func:
self._call_count += 1
if self._call_count > self._max_call_count:
logger.info(
"Reached maximum tracking limit of {} tasks. Running function regularly.".format(
self._max_call_count
)
)
self._call_as_func = True
return self._call_as_func
@contextlib.contextmanager
def tracking_context(self, call_args, call_kwargs):
user_code_called = False # whether we got to executing of user code
user_code_finished = False # whether we passed executing of user code
func_call = None
try:
# 1. check that we don't have too many calls
if self._call_count_limit_exceeded():
yield _do_nothing_decorator
return
# 2. Start or reuse existing "main tracking task" that is root for tracked tasks
if not try_get_current_task():
"""
try to get existing task, and if not exists - try to get/create inplace_task_run
"""
from dbnd._core.tracking.script_tracking_manager import (
try_get_inplace_tracking_task_run,
)
inplace_tacking_task = try_get_inplace_tracking_task_run()
if not inplace_tacking_task:
# we didn't manage to start inplace tracking task run, we will not be able to track
yield _do_nothing_decorator
return
tracking_task_definition = self.get_tracking_task_definition()
callable_spec = tracking_task_definition.task_decorator.get_callable_spec()
func_call = TrackedFuncCallWithResult(
callable=self.callable,
call_args=tuple(call_args), # prevent original call_args modification
call_kwargs=dict(call_kwargs), # prevent original kwargs modification
)
# replace any position argument with kwarg if it possible
args, kwargs = args_to_kwargs(
callable_spec.args, func_call.call_args, func_call.call_kwargs,
)
# instantiate inline task
task = TrackingTask.for_func(tracking_task_definition, args, kwargs)
# update upstream/downstream relations - needed for correct tracking
# we can have the task as upstream , as it was executed already
parent_task = current_task_run().task
if not parent_task.task_dag.has_upstream(task):
parent_task.set_upstream(task)
# checking if any of the inputs are the outputs of previous task.
# we can add that task as upstream.
dbnd_run = get_databand_run()
call_kwargs_as_targets = dbnd_run.target_origin.get_for_map(kwargs)
for value_origin in call_kwargs_as_targets.values():
up_task = value_origin.origin_target.task
task.set_upstream(up_task)
# creating task_run as a task we found mid-run
task_run = dbnd_run.create_task_run_at_execution_time(
task, task_engine=current_task_run().task_engine
)
should_capture_log = TrackingConfig.current().capture_tracking_log
with task_run.runner.task_run_execution_context(
handle_sigterm=True, capture_log=should_capture_log
):
task_run.set_task_run_state(state=TaskRunState.RUNNING)
_log_inputs(task_run)
# if we reached this line, then all tracking initialization is
# finished successfully, and we're going to execute user code
user_code_called = True
try:
# tracking_context is context manager - user code will run on yield
yield func_call.set_result
# if we reached this line, this means that user code finished
# successfully without any exceptions
user_code_finished = True
except Exception as ex:
task_run.finished_time = utcnow()
error = TaskRunError.build_from_ex(ex, task_run)
task_run.set_task_run_state(TaskRunState.FAILED, error=error)
raise
else:
task_run.finished_time = utcnow()
# func_call.result should contain result, log it
_log_result(task_run, func_call.result)
task_run.set_task_run_state(TaskRunState.SUCCESS)
except Exception:
if user_code_called and not user_code_finished:
# if we started to call the user code and not got to user_code_finished
# line - it means there was user code exception - so just re-raise it
raise
# else it's either we didn't reached calling user code, or already passed it
# then it's some dbnd tracking error - just log it
if func_call:
_handle_tracking_error("tracking-init", func_call)
else:
log_exception_to_server()
# if we didn't reached user_code_called=True line - there was an error during
# dbnd tracking initialization, so nothing is done - user function wasn't called yet
if not user_code_called:
# tracking_context is context manager - user code will run on yield
yield _do_nothing_decorator
return
def _handle_tracking_error(msg, func_call=None):
log_exception_to_server()
location = " for %s" % func_call.callable if func_call else ""
msg = "Failed during dbnd %s for %s, ignoring, and continue without tracking" % (
msg,
location,
)
if is_verbose():
logger.warning(
msg, exc_info=True,
)
else:
logger.info(msg)
def _do_nothing_decorator(f):
return f
def _log_inputs(task_run):
"""
For tracking mode. Logs InMemoryTarget inputs.
"""
try:
params = task_run.task._params
for param_value in params.get_param_values(ParameterFilters.INPUTS):
param, value = param_value.parameter, param_value.value
if isinstance(param_value, InMemoryTarget):
try:
param = param.modify(
value_meta_conf=ValueMetaConf(
log_preview=True, log_schema=True,
)
)
task_run.tracker.log_parameter_data(
parameter=param,
target=param_value,
value=value,
operation_type=DbndTargetOperationType.read,
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log input param to tracking store.",
ex=ex,
non_critical=True,
)
except Exception as ex:
log_exception(
"Failed to log input params to tracking store.", ex=ex, non_critical=True
)
def _log_result(task_run, result):
# type: (TaskRun, Any) -> None
"""
For tracking mode. Logs the task result and adds it to the target_origin map to support relationships between
dynamic tasks.
"""
try:
result_param = task_run.task.task_params.get_param_value(RESULT_PARAM)
if not result_param:
logger.debug(
"No result params to log for task {}".format(task_run.task_af_id)
)
return
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
result_param_def, result_target = result_param.parameter, result_param.value
# spread result into relevant fields.
if isinstance(result_param_def, FuncResultParameter):
# assign all returned values to relevant band Outputs
if result is None:
return
for result_name, value in result_param_def.named_results(result):
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
parameter_value = task_run.task.task_params.get_param_value(result_name)
_log_parameter_value(
task_run,
parameter_definition=parameter_value.parameter,
target=parameter_value.value,
value=value,
)
else:
_log_parameter_value(
task_run,
parameter_definition=result_param_def,
target=result_target,
value=result,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
def _log_parameter_value(task_run, parameter_definition, target, value):
# type: (TaskRun, ParameterDefinition, Target, Any) -> None
# make sure it will be logged correctly
parameter_definition = parameter_definition.modify(
value_meta_conf=ValueMetaConf(log_preview=True, log_schema=True)
)
try:
# case what if result is Proxy
value_type = get_value_type_of_obj(value, parameter_definition.value_type)
task_run.run.target_origin.add(target, value, value_type)
except Exception as ex:
log_exception(
"Failed to register result to target tracking.", ex=ex, non_critical=True
)
try:
task_run.tracker.log_parameter_data(
parameter=parameter_definition, # was: task_run.task.task_definition.task_class.result,
target=target,
value=value,
operation_type=DbndTargetOperationType.write, # is it write? (or log?)
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
| 1.65625 | 2 |
api.py | Benardi/redis-basics | 0 | 3085 | import os
import logging
from json import loads, dumps
from datetime import timedelta
from argparse import ArgumentParser
from redis import Redis
from flask import Response, Flask, request
app = Flask(__name__)
log = logging.getLogger(__name__)
parser = ArgumentParser()
parser.add_argument("-a", "--address",
action="store", dest="address",
type=str, required=True,
help="Address for api")
parser.add_argument("-p", "--port",
action="store", dest="port",
type=str, required=True,
help="Port for api")
parser.add_argument("-c", "--crt",
action="store", dest="cert",
type=str, required=False,
help="Path to certificate for this API")
parser.add_argument("-k", "--key",
action="store", dest="key",
type=str, required=False,
help="Path to key of certificate used by this API")
parser.add_argument("-rp", "--redis-port",
action="store", dest="redis-port",
type=str, required=True,
help="Port for Redis client")
args = vars(parser.parse_args())
api_address = args["address"]
api_port = args["port"]
api_cert = args["cert"]
api_key = args["key"]
redis_port = args["redis-port"]
r = Redis(port=redis_port, charset="utf-8", decode_responses=True)
@app.route("/hash", methods=['POST'])
def create_redis_hash():
data = loads(request.data)
success = r.hmset(data["key"], data["pairs"])
if data.get("expire") is not None:
expiration = timedelta(**data.get("expire"))
r.expire(data["key"], expiration)
response_body = {"success": success}
response_body[data["key"]] = r.hgetall(data["key"])
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/hash", methods=['PUT'])
def update_redis_hash():
data = loads(request.data)
success = r.hmset(data["key"], data["pairs"])
if data.get("expire") is not None:
expiration = timedelta(**data.get("expire"))
r.expire(data["key"], expiration)
if data.get("newkey") is not None:
r.rename(data["key"], data["newkey"])
response_body = {"success": success}
if data.get("newkey") is not None:
response_body[data["newkey"]] = r.hgetall(data["newkey"])
else:
response_body[data["key"]] = r.hgetall(data["key"])
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/hash", methods=['GET'])
def get_redis_hash():
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = r.hgetall(key)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/key", methods=['DELETE'])
def delete_redis_key():
status = 200
key = request.headers.get("key")
success = r.delete(key)
if not success:
status = 404
response_body = {"success": bool(success)}
return Response(dumps(response_body), status=status, mimetype="application/json")
@app.route("/list", methods=['POST'])
def create_redis_list():
data = loads(request.data)
strat = data.get("strategy")
if strat is not None and strat == "left":
length = r.lpush(data["key"], *data["values"])
else:
length = r.rpush(data["key"], *data["values"])
response_body = {"length": length}
response_body[data["key"]] = r.lrange(data["key"], 0, -1)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/list", methods=['GET'])
def get_entire_list():
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = r.lrange(key, 0, -1)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/list/<idx>", methods=['GET'])
def get_list_at_idx(idx):
response_body = {"success": True}
key = request.headers.get("key")
response_body[key] = {}
response_body[key][str(idx)] = r.lindex(key, idx)
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set", methods=['POST'])
def create_add_set():
data = loads(request.data)
length = r.sadd(data["key"], *data["values"])
response_body = {"length": length}
response_body[data["key"]] = list(r.smembers(data["key"]))
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set/<n_items>", methods=['GET'])
def get_n_items_set(n_items):
response_body = {"success": True}
key = request.headers.get("key")
response_body = {key: list(r.srandmember(key, n_items))}
return Response(dumps(response_body), status=200, mimetype="application/json")
@app.route("/set", methods=['GET'])
def get_set():
response_body = {"success": True}
key = request.headers.get("key")
response_body = {key: list(r.smembers(key))}
return Response(dumps(response_body), status=200, mimetype="application/json")
def start_api(address, port, clnt_cert=None, clnt_key=None):
if clnt_cert is None or clnt_key is None:
app.run(host=address, port=port, debug=False)
else:
app.run(host=address, port=port,
ssl_context=(clnt_cert, clnt_key), debug=False)
if api_cert is None or api_key is None:
start_api(api_address, api_port)
else:
start_api(api_address, api_port, api_cert, api_key)
| 2.609375 | 3 |
zhihu_spider/ZhihuSpider/spiders/zhihu.py | Ki-Seki/gadgets | 1 | 3086 | """
启动此 spider 前需要手动启动 Chrome,cmd 命令如下:
cd 进入 Chrome 可执行文件 所在的目录
执行:chrome.exe --remote-debugging-port=9222
此时在浏览器窗口地址栏访问:http://127.0.0.1:9222/json,如果页面出现 json 数据,则表明手动启动成功
启动此 spider 后,注意与命令行交互!
在 settings 当中要做的:
# ROBOTSTXT_OBEY = False # 如果不关闭,parse 方法无法执行
# COOKIES_ENABLED = True # 以便 Request 值在传递时自动传递 cookies
# USER_AGENT = 一个合适的值
# DOWNLOADER_MIDDLEWARES 配置好以备 user agent 的自动变换
"""
import re
import json
import datetime
import scrapy
from scrapy.loader import ItemLoader
from urllib import parse
from ZhihuSpider.utils.browsezhihu import get_cookies
from ZhihuSpider import settings
from ZhihuSpider.items import ZhihuQuestionItem, ZhihuAnswerItem
class ZhihuSpider(scrapy.Spider):
name = 'zhihu'
allowed_domains = ['zhihu.com']
start_urls = ['http://zhihu.com/']
# 通用的 question 第一页 answer 请求 url
# 0: question id, 1: offset, 2: limit
start_answer_urls = 'https://www.zhihu.com/api/v4/questions/{0}/answers?include=data%5B*%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labeled%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B*%5D.mark_infos%5B*%5D.url%3Bdata%5B*%5D.author.follower_count%2Cvip_info%2Cbadge%5B*%5D.topics%3Bdata%5B*%5D.settings.table_of_content.enabled&offset={1}&limit={2}&sort_by=default&platform=desktop'
headers = {
"HOST": "www.zhihu.com",
"Referer": "https://www.zhihu.com",
"User-Agent": settings.USER_AGENT
}
# 提取主页所有指向问题的 url
def parse(self, response, **kwargs):
# .extract() 是 parsel.selection 中的函数,用于提取元素集合中的 data 域的值
all_urls = response.css("a::attr(href)").extract()
# urllib.parse.urljoin 可以合并两个不完整 url
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
all_urls = filter(lambda x: True if x.startswith("https") else False, all_urls)
for url in all_urls:
# (/|$) 表示匹配 / 或“结束”
match_obj = re.match("(.*zhihu.com/question/(\d+))(/|$).*", url)
if match_obj: # 如果是一个含有指向 question 页的 url
question_url = match_obj.group(1)
question_id = match_obj.group(2)
yield scrapy.Request(question_url, callback=self.parse_question, headers=self.headers
, meta={"question_id": question_id, "url": question_url}) # meta 可以向下传递
def parse_question(self, response):
"""
提取问题页 question item
"""
# 使用 ItemLoader 时,每个字段值都是一个 list
item_loader = ItemLoader(item=ZhihuQuestionItem(), response=response)
item_loader.add_value("question_id", response.meta.get("question_id", 0)) # 使用 meta 来加载
item_loader.add_css("topics", "head > meta[name=keywords]::attr(content)")
item_loader.add_value("url", response.meta.get("url", ''))
item_loader.add_css("title", "h1.QuestionHeader-title::text")
item_loader.add_css("content", ".QuestionRichText span:nth-child(1)::text")
item_loader.add_css("answer_num", ".List-headerText > span::text, .ViewAll:nth-child(1) > a::text")
item_loader.add_css("comments_num", ".QuestionHeader-Comment button::text")
item_loader.add_css("watch_user_num", ".NumberBoard-itemValue::attr(title)")
item_loader.add_css("click_num", ".NumberBoard-itemValue::attr(title)")
# 关于获取 create_time update_time
# request log url of question,接着,将以上 item_loader 的内容改为 meta 字典向下传递
# 最终交到 get_create_update_of_question 中去打包 question_item 然后 yield
# 未完成的部分实现如下
# tmp = response.css(".QuestionHeader-menu > a").extract()[0]
# log_url = parse.urljoin(self.start_urls[0], tmp)
# yield scrapy.Request(log_url, callback=self.get_create_update_of_question, headers=self.headers, meta=......)
question_item = item_loader.load_item()
yield question_item
yield scrapy.Request(self.start_answer_urls.format(response.meta.get("question_id", ''), 0, 20)
, callback=self.parse_answer, headers=self.headers)
# def get_create_update_of_question(self, response):
# pass
def parse_answer(self, response):
"""
提取答案页 answer item
"""
answer_json = json.loads(response.text)
is_end = answer_json["paging"]["is_end"]
next_url = answer_json["paging"]["next"]
for answer in answer_json["data"]:
answer_item = ZhihuAnswerItem()
answer_item["answer_id"] = answer["id"]
answer_item["url"] = answer["url"]
answer_item["question_id"] = answer["question"]["id"]
answer_item["author_id"] = answer["author"]["id"]
answer_item["content"] = answer["content"] if "content" in answer else None
answer_item["praise_num"] = answer["voteup_count"]
answer_item["comments_num"] = answer["comment_count"]
answer_item["create_time"] = answer["created_time"]
answer_item["update_time"] = answer["updated_time"]
answer_item["crawl_time"] = datetime.datetime.now()
yield answer_item
if not is_end:
yield scrapy.Request(next_url, callback=self.parse_answer, headers=self.headers)
def start_requests(self):
# 在使用 selenium 前要用以下 cmd 启动 chrome
# cd "C:\Program Files\Google\Chrome\Application"
# chrome.exe --remote-debugging-port=9222
# 不能使用下面的 python 代码的原因是:这个命令是要求返回值的,除非使用多线程
# os.system('"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe" --remote-debugging-port=9222')
cookies = get_cookies()
yield scrapy.Request(url=self.start_urls[0], dont_filter=True, cookies=cookies)
| 2.640625 | 3 |
tests/test_bindiff.py | Kyle-Kyle/angr | 6,132 | 3087 | import nose
import angr
import logging
l = logging.getLogger("angr.tests.test_bindiff")
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
# todo make a better test
def test_bindiff_x86_64():
binary_path_1 = os.path.join(test_location, 'x86_64', 'bindiff_a')
binary_path_2 = os.path.join(test_location, 'x86_64', 'bindiff_b')
b = angr.Project(binary_path_1, load_options={"auto_load_libs": False})
b2 = angr.Project(binary_path_2, load_options={"auto_load_libs": False})
bindiff = b.analyses.BinDiff(b2)
identical_functions = bindiff.identical_functions
differing_functions = bindiff.differing_functions
unmatched_functions = bindiff.unmatched_functions
# check identical functions
nose.tools.assert_in((0x40064c, 0x40066a), identical_functions)
# check differing functions
nose.tools.assert_in((0x400616, 0x400616), differing_functions)
# check unmatched functions
nose.tools.assert_less_equal(len(unmatched_functions[0]), 1)
nose.tools.assert_less_equal(len(unmatched_functions[1]), 2)
# check for no major regressions
nose.tools.assert_greater(len(identical_functions), len(differing_functions))
nose.tools.assert_less(len(differing_functions), 4)
# check a function diff
fdiff = bindiff.get_function_diff(0x400616, 0x400616)
block_matches = { (a.addr, b.addr) for a, b in fdiff.block_matches }
nose.tools.assert_in((0x40064a, 0x400668), block_matches)
nose.tools.assert_in((0x400616, 0x400616), block_matches)
nose.tools.assert_in((0x40061e, 0x40061e), block_matches)
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.analyses.bindiff").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
| 2.28125 | 2 |
main/handle_file.py | nucluster/us_states | 0 | 3088 | from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
# def handle_uploaded_file(f):
# with open('screenshot.png', 'wb') as destination:
# # for chunk in f.chunks():
# # destination.write(chunk)
# destination.write(f)
with open(
BASE_DIR/'media'/'Greater_coat_of_arms_of_the_United_States.png', 'rb'
) as file:
flag = file.read()
# handle_uploaded_file(flag)
print(type(flag))
print(len(flag))
# print(flag)
# for place in sys.path:
# print(place) | 2.890625 | 3 |
2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py | iicarus-bit/google-ctf | 2,757 | 3089 | #!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aiohttp import web
import capstone
import functools
from gdbproc import GDBProcess
import socketio
import asyncio
import codecs
import os
enable_logging = False
premium = 'PREMIUM' in os.environ
if premium:
access_key = os.getenv('PREMIUM_KEY')
runnable = ['/home/user/printwebflag']
else:
access_key = os.getenv('TRIAL_KEY')
runnable = ['/bin/sleep', '20']
MAX_INSN_LEN = 15
capstone_md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
sio = socketio.AsyncServer()
app = web.Application()
sio.attach(app)
with open('index.html') as f:
index_html = f.read()
async def index(request):
if not 'key' in request.cookies:
return web.Response(status=401, text='permission denied (missing key)', content_type='text/html')
if request.cookies['key'] != access_key:
return web.Response(status=401, text='permission denied (invalid key)', content_type='text/html')
return web.Response(text=index_html, content_type='text/html')
app.add_routes([web.get('/', index),
web.get('/{name}', index)])
gdb_sessions = {}
stop_queue_readers = {}
async def on_shutdown(app):
await asyncio.gather(delete_gdb_process(sid) for sid in gdb_sessions.keys())
app.on_shutdown.append(on_shutdown)
def log(msg):
if enable_logging:
print('[*] {}'.format(msg))
@sio.on('connect')
def connect(sid, environ):
log('connected {}'.format(sid))
if not 'key={}'.format(access_key) in environ['HTTP_COOKIE']:
log('access_key not found {}'.format(environ['HTTP_COOKIE']))
return False
@sio.on('disconnect')
async def disconnect(sid):
log('disconnected {}'.format(sid))
await delete_gdb_process(sid)
async def stop_queue_reader(sid, queue):
while True:
pkt = await queue.get()
await update_all(sid)
async def create_gdb_process(sid):
stop_queue = asyncio.Queue()
gdb_sessions[sid] = await GDBProcess.create(runnable, stop_queue, env={'KEY': access_key}, log_fn=log)
loop = asyncio.get_event_loop()
stop_queue_readers[sid] = loop.create_task(stop_queue_reader(sid, stop_queue))
async def delete_gdb_process(sid):
if sid in gdb_sessions:
stop_queue_readers[sid].cancel()
del stop_queue_readers[sid]
await gdb_sessions[sid].release()
del gdb_sessions[sid]
@sio.on('start')
async def start(sid):
await delete_gdb_process(sid)
await create_gdb_process(sid)
# Reading registers doesn't work on ubuntu 18.04 for some reason.
# Step once as a work around
step(sid)
async def update_all(sid):
log('updating sid {}'.format(sid))
regs_task = getregs(sid)
maps_task = getmaps(sid)
asm_task = getasm(sid, {'addr': await gdb_sessions[sid].get_reg('rip'), 'count': 100})
await asyncio.gather(regs_task, maps_task, asm_task)
log('update done')
@sio.on('step')
def step(sid):
gdb_sessions[sid].step()
@sio.on('cont')
def cont(sid):
gdb_sessions[sid].cont()
@sio.on('stop')
def stop(sid):
gdb_sessions[sid].interrupt()
async def getregs(sid):
regs = await gdb_sessions[sid].get_regs()
await sio.emit('regs', regs, room=sid)
@sio.on('mem')
async def getmem(sid, msg):
addr = msg['addr']
count = msg['count']
data = gdb_sessions[sid].read_mem(addr, count)
await sio.emit('mem', {'addr': addr, 'data': data}, room=sid)
async def getmaps(sid):
maps = gdb_sessions[sid].maps()
await sio.emit('maps', maps, room=sid)
@sio.on('break')
async def setbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].set_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('unbreak')
async def rmbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].remove_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('search')
async def search(sid, data):
q = data['q']
qtype = data['type']
await sio.emit('search_result', gdb_sessions[sid].search(q.encode(), qtype), room=sid)
async def getasm(sid, data):
addr = data['addr']
count = data['count']
result = []
for _ in range(count):
data = gdb_sessions[sid].read_mem(addr, MAX_INSN_LEN)
try:
disasm = next(capstone_md.disasm_lite(data, addr))
except StopIteration:
break
result.append(disasm)
addr += disasm[1]
await sio.emit('asm', result, room=sid)
if __name__ == '__main__':
web.run_app(app)
| 1.882813 | 2 |
examples/multi_physics/piezo_elasticity.py | BubuLK/sfepy | 0 | 3090 | <filename>examples/multi_physics/piezo_elasticity.py
r"""
Piezo-elasticity problem - linear elastic material with piezoelectric
effects.
Find :math:`\ul{u}`, :math:`\phi` such that:
.. math::
- \omega^2 \int_{Y} \rho\ \ul{v} \cdot \ul{u}
+ \int_{Y} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
- \int_{Y_2} g_{kij}\ e_{ij}(\ul{v}) \nabla_k \phi
= 0
\;, \quad \forall \ul{v} \;,
\int_{Y_2} g_{kij}\ e_{ij}(\ul{u}) \nabla_k \psi
+ \int_{Y} K_{ij} \nabla_i \psi \nabla_j \phi
= 0
\;, \quad \forall \psi \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""
from __future__ import absolute_import
import os
import numpy as nm
from sfepy import data_dir
from sfepy.discrete.fem import MeshIO
from sfepy.mechanics.matcoefs import stiffness_from_lame
import six
def post_process(out, pb, state, extend=False):
"""
Calculate and output the strain and stresses for the given state.
"""
from sfepy.base.base import Struct
from sfepy.discrete.fem import extend_cell_data
ev = pb.evaluate
strain = ev('ev_cauchy_strain.i.Y(u)', mode='el_avg')
stress = ev('ev_cauchy_stress.i.Y(inclusion.D, u)', mode='el_avg')
piezo = -ev('ev_piezo_stress.i.Y2(inclusion.coupling, phi)',
mode='el_avg')
piezo = extend_cell_data(piezo, pb.domain, 'Y2', val=0.0)
piezo_strain = ev('ev_piezo_strain.i.Y(inclusion.coupling, u)',
mode='el_avg')
out['cauchy_strain'] = Struct(name='output_data', mode='cell',
data=strain, dofs=None)
out['elastic_stress'] = Struct(name='output_data', mode='cell',
data=stress, dofs=None)
out['piezo_stress'] = Struct(name='output_data', mode='cell',
data=piezo, dofs=None)
out['piezo_strain'] = Struct(name='output_data', mode='cell',
data=piezo_strain, dofs=None)
out['total_stress'] = Struct(name='output_data', mode='cell',
data=stress + piezo, dofs=None)
return out
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/circle_in_square_small.mesh'
## filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
## filename_mesh = data_dir + '/meshes/2d/special/cube_cylinder.mesh'
omega = 1
omega_squared = omega**2
conf_dir = os.path.dirname(__file__)
io = MeshIO.any_from_filename(filename_mesh, prefix_dir=conf_dir)
bbox, dim = io.read_bounding_box(ret_dim=True)
geom = {3 : '3_4', 2 : '2_3'}[dim]
x_left, x_right = bbox[:,0]
options = {
'post_process_hook' : 'post_process',
}
regions = {
'Y' : 'all',
'Y1' : 'cells of group 1',
'Y2' : 'cells of group 2',
'Y2_Surface': ('r.Y1 *v r.Y2', 'facet'),
'Left' : ('vertices in (x < %f)' % (x_left + 1e-3), 'facet'),
'Right' : ('vertices in (x > %f)' % (x_right - 1e-3), 'facet'),
}
fields = {
'displacement' : ('real', dim, 'Y', 1),
'potential' : ('real', 1, 'Y', 1),
}
variables = {
'u' : ('unknown field', 'displacement', 0),
'v' : ('test field', 'displacement', 'u'),
'phi' : ('unknown field', 'potential', 1),
'psi' : ('test field', 'potential', 'phi'),
}
ebcs = {
'u1' : ('Left', {'u.all' : 0.0}),
'u2' : ('Right', {'u.0' : 0.1}),
'phi' : ('Y2_Surface', {'phi.all' : 0.0}),
}
def get_inclusion_pars(ts, coor, mode=None, **kwargs):
"""TODO: implement proper 3D -> 2D transformation of constitutive
matrices."""
if mode == 'qp':
_, dim = coor.shape
sym = (dim + 1) * dim // 2
dielectric = nm.eye(dim, dtype=nm.float64)
# !!!
coupling = nm.ones((dim, sym), dtype=nm.float64)
# coupling[0,1] = 0.2
out = {
# Lame coefficients in 1e+10 Pa.
'D' : stiffness_from_lame(dim=2, lam=0.1798, mu=0.148),
# dielectric tensor
'dielectric' : dielectric,
# piezoelectric coupling
'coupling' : coupling,
'density' : nm.array([[0.1142]]), # in 1e4 kg/m3
}
for key, val in six.iteritems(out):
out[key] = val[None, ...]
return out
materials = {
'inclusion' : (None, 'get_inclusion_pars')
}
functions = {
'get_inclusion_pars' : (get_inclusion_pars,),
}
integrals = {
'i' : 2,
}
equations = {
'1' : """- %f * dw_volume_dot.i.Y(inclusion.density, v, u)
+ dw_lin_elastic.i.Y(inclusion.D, v, u)
- dw_piezo_coupling.i.Y2(inclusion.coupling, v, phi)
= 0""" % omega_squared,
'2' : """dw_piezo_coupling.i.Y2(inclusion.coupling, u, psi)
+ dw_diffusion.i.Y(inclusion.dielectric, psi, phi)
= 0""",
}
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton',
{'i_max' : 1,
'eps_a' : 1e-10,
}),
}
| 2.421875 | 2 |
01-logica-de-programacao-e-algoritmos/Aula 06/01 Tuplas/1.2 Desempacotamento de parametros em funcoes/ex01.py | rafaelbarretomg/Uninter | 0 | 3091 | <reponame>rafaelbarretomg/Uninter<gh_stars>0
# Desempacotamento de parametros em funcoes
# somando valores de uma tupla
def soma(*num):
soma = 0
print('Tupla: {}' .format(num))
for i in num:
soma += i
return soma
# Programa principal
print('Resultado: {}\n' .format(soma(1, 2)))
print('Resultado: {}\n' .format(soma(1, 2, 3, 4, 5, 6, 7, 8, 9)))
| 3.484375 | 3 |
services/model.py | theallknowng/eKheti | 1 | 3092 | <reponame>theallknowng/eKheti<filename>services/model.py<gh_stars>1-10
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
import keras
import sys
import json
import requests
import numpy as np
# define baseline model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(56, input_dim=28, activation='relu'))
model.add(Dense(112, input_dim=56, activation='relu'))
model.add(Dense(7, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
model = baseline_model()
model.load_weights("../model1.h5")
# data = sys.argv[1]
# data = '{"pH min":5.7,"pH max":7,"nitrogen min":109,"nitrogen max":146,"phosphorus min":20,"phosphorus max":30,"potasium min":78,"potasium max":115,"calcium min":270,"calcium max":990,"magnesium min":46,"magnesium max":96,"sulphur min":10,"sulphur max":10,"iron min":44,"iron max":46,"zinc min":3.87,"zinc max":5.87,"manganese min":4.81,"manganese max":4.81,"copper min":21,"copper max":26,"boron min":1.25,"boron max":2.25,"temperature min":25,"temperature max":35,"precipitation min":50,"precipitation max":60,"irrigation":"yes ","region":"barshi"}'
# data = '{"pH min":7.6,"pH max":7.6,"nitrogen min":150.53,"nitrogen max":150.53,"phosphorus min":55.96,"phosphorus max":55.96,"potasium min":728,"potasium max":728,"calcium min":45.56,"calcium max":45.56,"magnesium min":36.46,"magnesium max":36.46,"sulphur min":44.69,"sulphur max":44.69,"iron min":2.7,"iron max":2.7,"zinc min":0.49,"zinc max":0.49,"manganese min":2.16,"manganese max":2.16,"copper min":3.5,"copper max":3.5,"boron min":0.63,"boron max":0.63,"temperature min":21,"temperature max":31,"precipitation min":60.18,"precipitation max":60.18,"irrigation":"yes ","region":"barshi"}'
data= '{"pH min":5.7,"pH max":7,"nitrogen min":109,"nitrogen max":146,"phosphorus min":20,"phosphorus max":30,"potasium min":78,"potasium max":115,"calcium min":270,"calcium max":990,"magnesium min":46,"magnesium max":96,"sulphur min":10,"sulphur max":10,"iron min":44,"iron max":46,"zinc min":3.87,"zinc max":5.87,"manganese min":4.81,"manganese max":4.81,"copper min":21,"copper max":26,"boron min":1.25,"boron max":2.25,"temperature min":25,"temperature max":35,"precipitation min":50,"precipitation max":60,"irrigation":"yes ","region":"barshi"}'
data = json.loads(data)
dataframe = pandas.DataFrame(data,index=[0])
dataset = dataframe.values
X = dataset[:,0:28].astype(float)
op=model.predict(X)
#op = model.predict_classes(X)
#print(op)
#classes = np.argmax(op)
#print(classes)
best_n = np.argsort(op, axis=1)[:,-7:]
print(best_n[0])
| 2.53125 | 3 |
tests/sentry/api/endpoints/test_project_details.py | erhuabushuo/sentry | 0 | 3093 | from django.core.urlresolvers import reverse
from sentry.models import Project
from sentry.testutils import APITestCase
class ProjectDetailsTest(APITestCase):
def test_simple(self):
project = self.project # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
response = self.client.get(url)
assert response.status_code == 200
assert response.data['id'] == str(project.id)
class ProjectUpdateTest(APITestCase):
def test_simple(self):
project = self.project # force creation
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
resp = self.client.put(url, data={
'name': 'hello world',
'slug': 'foobar',
})
assert resp.status_code == 200, resp.content
project = Project.objects.get(id=project.id)
assert project.name == 'hello world'
assert project.slug == 'foobar'
class ProjectDeleteTest(APITestCase):
def test_simple(self):
project = self.create_project()
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
with self.settings(SENTRY_PROJECT=0):
response = self.client.delete(url)
assert response.status_code == 204
assert not Project.objects.filter(id=project.id).exists()
def test_internal_project(self):
project = self.create_project()
self.login_as(user=self.user)
url = reverse('sentry-api-0-project-details', kwargs={'project_id': project.id})
with self.settings(SENTRY_PROJECT=project.id):
response = self.client.delete(url)
assert response.status_code == 403
| 2.234375 | 2 |
tests/test_table.py | databook1/python-pptx | 0 | 3094 | # encoding: utf-8
"""Unit-test suite for `pptx.table` module."""
import pytest
from pptx.dml.fill import FillFormat
from pptx.dml.border import BorderFormat
from pptx.enum.text import MSO_ANCHOR
from pptx.oxml.ns import qn
from pptx.oxml.table import CT_Table, CT_TableCell, TcRange
from pptx.shapes.graphfrm import GraphicFrame
from pptx.table import (
_Cell,
_CellCollection,
_Column,
_ColumnCollection,
_Row,
_RowCollection,
Table,
)
from pptx.text.text import TextFrame
from pptx.util import Inches, Length, Pt
from .unitutil.cxml import element, xml
from .unitutil.mock import call, class_mock, instance_mock, property_mock
class DescribeTable(object):
"""Unit-test suite for `pptx.table.Table` objects."""
def it_provides_access_to_its_cells(self, tbl_, tc_, _Cell_, cell_):
row_idx, col_idx = 4, 2
tbl_.tc.return_value = tc_
_Cell_.return_value = cell_
table = Table(tbl_, None)
cell = table.cell(row_idx, col_idx)
tbl_.tc.assert_called_once_with(row_idx, col_idx)
_Cell_.assert_called_once_with(tc_, table)
assert cell is cell_
def it_provides_access_to_its_columns(self, request):
columns_ = instance_mock(request, _ColumnCollection)
_ColumnCollection_ = class_mock(
request, "pptx.table._ColumnCollection", return_value=columns_
)
tbl = element("a:tbl")
table = Table(tbl, None)
columns = table.columns
_ColumnCollection_.assert_called_once_with(tbl, table)
assert columns is columns_
def it_can_iterate_its_grid_cells(self, request, _Cell_):
tbl = element("a:tbl/(a:tr/(a:tc,a:tc),a:tr/(a:tc,a:tc))")
expected_tcs = tbl.xpath(".//a:tc")
expected_cells = _Cell_.side_effect = [
instance_mock(request, _Cell, name="cell%d" % idx) for idx in range(4)
]
table = Table(tbl, None)
cells = list(table.iter_cells())
assert cells == expected_cells
assert _Cell_.call_args_list == [call(tc, table) for tc in expected_tcs]
def it_provides_access_to_its_rows(self, request):
rows_ = instance_mock(request, _RowCollection)
_RowCollection_ = class_mock(
request, "pptx.table._RowCollection", return_value=rows_
)
tbl = element("a:tbl")
table = Table(tbl, None)
rows = table.rows
_RowCollection_.assert_called_once_with(tbl, table)
assert rows is rows_
def it_updates_graphic_frame_width_on_width_change(self, dx_fixture):
table, expected_width = dx_fixture
table.notify_width_changed()
assert table._graphic_frame.width == expected_width
def it_updates_graphic_frame_height_on_height_change(self, dy_fixture):
table, expected_height = dy_fixture
table.notify_height_changed()
assert table._graphic_frame.height == expected_height
# fixtures -------------------------------------------------------
@pytest.fixture
def dx_fixture(self, graphic_frame_):
tbl_cxml = "a:tbl/a:tblGrid/(a:gridCol{w=111},a:gridCol{w=222})"
table = Table(element(tbl_cxml), graphic_frame_)
expected_width = 333
return table, expected_width
@pytest.fixture
def dy_fixture(self, graphic_frame_):
tbl_cxml = "a:tbl/(a:tr{h=100},a:tr{h=200})"
table = Table(element(tbl_cxml), graphic_frame_)
expected_height = 300
return table, expected_height
# fixture components ---------------------------------------------
@pytest.fixture
def _Cell_(self, request):
return class_mock(request, "pptx.table._Cell")
@pytest.fixture
def cell_(self, request):
return instance_mock(request, _Cell)
@pytest.fixture
def graphic_frame_(self, request):
return instance_mock(request, GraphicFrame)
@pytest.fixture
def tbl_(self, request):
return instance_mock(request, CT_Table)
@pytest.fixture
def tc_(self, request):
return instance_mock(request, CT_TableCell)
class DescribeTableBooleanProperties(object):
def it_knows_its_boolean_property_settings(self, boolprop_get_fixture):
table, boolprop_name, expected_value = boolprop_get_fixture
boolprop_value = getattr(table, boolprop_name)
assert boolprop_value is expected_value
def it_can_change_its_boolean_property_settings(self, boolprop_set_fixture):
table, boolprop_name, new_value, expected_xml = boolprop_set_fixture
setattr(table, boolprop_name, new_value)
assert table._tbl.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("a:tbl", "first_row", False),
("a:tbl/a:tblPr", "first_row", False),
("a:tbl/a:tblPr{firstRow=1}", "first_row", True),
("a:tbl/a:tblPr{firstRow=0}", "first_row", False),
("a:tbl/a:tblPr{firstRow=true}", "first_row", True),
("a:tbl/a:tblPr{firstRow=false}", "first_row", False),
("a:tbl/a:tblPr{firstCol=1}", "first_col", True),
("a:tbl/a:tblPr{lastRow=0}", "last_row", False),
("a:tbl/a:tblPr{lastCol=true}", "last_col", True),
("a:tbl/a:tblPr{bandRow=false}", "horz_banding", False),
("a:tbl/a:tblPr", "vert_banding", False),
]
)
def boolprop_get_fixture(self, request):
tbl_cxml, boolprop_name, expected_value = request.param
table = Table(element(tbl_cxml), None)
return table, boolprop_name, expected_value
@pytest.fixture(
params=[
("a:tbl", "first_row", True, "a:tbl/a:tblPr{firstRow=1}"),
("a:tbl", "first_row", False, "a:tbl/a:tblPr"),
("a:tbl/a:tblPr", "first_row", True, "a:tbl/a:tblPr{firstRow=1}"),
("a:tbl/a:tblPr", "first_row", False, "a:tbl/a:tblPr"),
(
"a:tbl/a:tblPr{firstRow=true}",
"first_row",
True,
"a:tbl/a:tblPr{firstRow=1}",
),
("a:tbl/a:tblPr{firstRow=false}", "first_row", False, "a:tbl/a:tblPr"),
(
"a:tbl/a:tblPr{bandRow=1}",
"first_row",
True,
"a:tbl/a:tblPr{bandRow=1,firstRow=1}",
),
("a:tbl", "first_col", True, "a:tbl/a:tblPr{firstCol=1}"),
("a:tbl", "last_row", True, "a:tbl/a:tblPr{lastRow=1}"),
("a:tbl", "last_col", True, "a:tbl/a:tblPr{lastCol=1}"),
("a:tbl", "horz_banding", True, "a:tbl/a:tblPr{bandRow=1}"),
("a:tbl", "vert_banding", True, "a:tbl/a:tblPr{bandCol=1}"),
]
)
def boolprop_set_fixture(self, request):
tbl_cxml, boolprop_name, new_value, expected_tbl_cxml = request.param
table = Table(element(tbl_cxml), None)
expected_xml = xml(expected_tbl_cxml)
return table, boolprop_name, new_value, expected_xml
class Describe_Cell(object):
"""Unit-test suite for `pptx.table._Cell` object."""
def it_is_equal_to_other_instance_having_same_tc(self):
tc = element("a:tc")
other_tc = element("a:tc")
cell = _Cell(tc, None)
cell_with_same_tc = _Cell(tc, None)
cell_with_other_tc = _Cell(other_tc, None)
assert cell == cell_with_same_tc
assert cell != cell_with_other_tc
def it_has_a_fill(self, fill_fixture):
cell = fill_fixture
assert isinstance(cell.fill, FillFormat)
def it_knows_whether_it_is_merge_origin_cell(self, origin_fixture):
tc, expected_value = origin_fixture
cell = _Cell(tc, None)
is_merge_origin = cell.is_merge_origin
assert is_merge_origin is expected_value
def it_knows_whether_it_is_spanned(self, spanned_fixture):
tc, expected_value = spanned_fixture
cell = _Cell(tc, None)
is_spanned = cell.is_spanned
assert is_spanned is expected_value
def it_knows_its_margin_settings(self, margin_get_fixture):
cell, margin_prop_name, expected_value = margin_get_fixture
margin_value = getattr(cell, margin_prop_name)
assert margin_value == expected_value
def it_can_change_its_margin_settings(self, margin_set_fixture):
cell, margin_prop_name, new_value, expected_xml = margin_set_fixture
setattr(cell, margin_prop_name, new_value)
assert cell._tc.xml == expected_xml
def it_raises_on_margin_assigned_other_than_int_or_None(
self, margin_raises_fixture
):
cell, margin_attr_name, val_of_invalid_type = margin_raises_fixture
with pytest.raises(TypeError):
setattr(cell, margin_attr_name, val_of_invalid_type)
def it_can_merge_a_range_of_cells(self, TcRange_, tc_range_):
tbl = element("a:tbl/(a:tr/(a:tc,a:tc),a:tr/(a:tc,a:tc))")
tc, other_tc = tbl.tc(0, 0), tbl.tc(1, 1)
TcRange_.return_value = tc_range_
tc_range_.contains_merged_cell = False
tc_range_.dimensions = 2, 2
def tcs(*rowcols):
return (tbl.tc(*rowcol) for rowcol in rowcols)
tc_range_.iter_top_row_tcs.return_value = tcs((0, 0), (0, 1))
tc_range_.iter_left_col_tcs.return_value = tcs((0, 0), (1, 0))
tc_range_.iter_except_left_col_tcs.return_value = tcs((0, 1), (1, 1))
tc_range_.iter_except_top_row_tcs.return_value = tcs((1, 0), (1, 1))
expected_xml = xml(
"a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{rowSpan=2,hMerge=1"
"}),a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))"
)
cell, other_cell = _Cell(tc, None), _Cell(other_tc, None)
cell.merge(other_cell)
TcRange_.assert_called_once_with(tc, other_tc)
tc_range_.move_content_to_origin.assert_called_once_with()
assert tbl.xml == expected_xml
def but_it_raises_when_cells_are_from_different_tables(self, TcRange_, tc_range_):
TcRange_.return_value = tc_range_
tc_range_.in_same_table = False
cell, other_cell = _Cell(None, None), _Cell(None, None)
with pytest.raises(ValueError) as e:
cell.merge(other_cell)
assert "different table" in str(e.value)
def and_it_raises_when_range_contains_merged_cell(self, TcRange_, tc_range_):
TcRange_.return_value = tc_range_
tc_range_.contains_merged_cell = True
cell, other_cell = _Cell(None, None), _Cell(None, None)
with pytest.raises(ValueError) as e:
cell.merge(other_cell)
assert "contains one or more merged cells" in str(e.value)
def it_knows_how_many_rows_the_merge_spans(self, height_fixture):
tc, expected_value = height_fixture
cell = _Cell(tc, None)
span_height = cell.span_height
assert span_height == expected_value
def it_knows_how_many_columns_the_merge_spans(self, width_fixture):
tc, expected_value = width_fixture
cell = _Cell(tc, None)
span_width = cell.span_width
assert span_width == expected_value
def it_can_split_a_merged_cell(self, split_fixture):
origin_tc, range_tcs = split_fixture
cell = _Cell(origin_tc, None)
cell.split()
assert all(tc.gridSpan == 1 for tc in range_tcs)
assert all(tc.rowSpan == 1 for tc in range_tcs)
assert all(not tc.hMerge for tc in range_tcs)
assert all(not tc.vMerge for tc in range_tcs)
def but_it_raises_when_cell_to_be_split_is_not_merge_origin(self):
tc = element("a:tbl/a:tr/a:tc").xpath("//a:tc")[0]
cell = _Cell(tc, None)
with pytest.raises(ValueError) as e:
cell.split()
assert "not a merge-origin cell" in str(e.value)
def it_knows_what_text_it_contains(self, text_frame_prop_, text_frame_):
text_frame_prop_.return_value = text_frame_
text_frame_.text = "foobar"
cell = _Cell(None, None)
text = cell.text
assert text == "foobar"
def it_can_change_its_text(self, text_frame_prop_, text_frame_):
text_frame_prop_.return_value = text_frame_
cell = _Cell(None, None)
cell.text = "føøbår"
assert text_frame_.text == "føøbår"
def it_knows_its_vertical_anchor_setting(self, anchor_get_fixture):
cell, expected_value = anchor_get_fixture
assert cell.vertical_anchor == expected_value
def it_can_change_its_vertical_anchor(self, anchor_set_fixture):
cell, new_value, expected_xml = anchor_set_fixture
cell.vertical_anchor = new_value
assert cell._tc.xml == expected_xml
def it_knows_it_has_border_settings(self, border_fixture):
cell = border_fixture
assert isinstance(cell.border_left, BorderFormat)
assert isinstance(cell.border_right, BorderFormat)
assert isinstance(cell.border_top, BorderFormat)
assert isinstance(cell.border_bottom, BorderFormat)
assert isinstance(cell.border_tl_br, BorderFormat)
assert isinstance(cell.border_bl_tr, BorderFormat)
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
("a:tc", None),
("a:tc/a:tcPr", None),
("a:tc/a:tcPr{anchor=t}", MSO_ANCHOR.TOP),
("a:tc/a:tcPr{anchor=ctr}", MSO_ANCHOR.MIDDLE),
("a:tc/a:tcPr{anchor=b}", MSO_ANCHOR.BOTTOM),
]
)
def anchor_get_fixture(self, request):
tc_cxml, expected_value = request.param
cell = _Cell(element(tc_cxml), None)
return cell, expected_value
@pytest.fixture(
params=[
("a:tc", None, "a:tc"),
("a:tc", MSO_ANCHOR.TOP, "a:tc/a:tcPr{anchor=t}"),
("a:tc", MSO_ANCHOR.MIDDLE, "a:tc/a:tcPr{anchor=ctr}"),
("a:tc", MSO_ANCHOR.BOTTOM, "a:tc/a:tcPr{anchor=b}"),
("a:tc/a:tcPr{anchor=t}", MSO_ANCHOR.MIDDLE, "a:tc/a:tcPr{anchor=ctr}"),
("a:tc/a:tcPr{anchor=ctr}", None, "a:tc/a:tcPr"),
]
)
def anchor_set_fixture(self, request):
tc_cxml, new_value, expected_tc_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_tc_cxml)
return cell, new_value, expected_xml
@pytest.fixture
def fill_fixture(self, cell):
return cell
@pytest.fixture
def border_fixture(self, cell):
return cell
@pytest.fixture(
params=[("a:tc", 1), ("a:tc{gridSpan=2}", 1), ("a:tc{rowSpan=42}", 42)]
)
def height_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
("a:tc/a:tcPr{marL=82296}", "margin_left", Inches(0.09)),
("a:tc/a:tcPr{marR=73152}", "margin_right", Inches(0.08)),
("a:tc/a:tcPr{marT=64008}", "margin_top", Inches(0.07)),
("a:tc/a:tcPr{marB=54864}", "margin_bottom", Inches(0.06)),
("a:tc", "margin_left", Inches(0.1)),
("a:tc/a:tcPr", "margin_right", Inches(0.1)),
("a:tc", "margin_top", Inches(0.05)),
("a:tc/a:tcPr", "margin_bottom", Inches(0.05)),
]
)
def margin_get_fixture(self, request):
tc_cxml, margin_prop_name, expected_value = request.param
cell = _Cell(element(tc_cxml), None)
return cell, margin_prop_name, expected_value
@pytest.fixture(
params=[
("a:tc", "margin_left", Inches(0.08), "a:tc/a:tcPr{marL=73152}"),
("a:tc", "margin_right", Inches(0.08), "a:tc/a:tcPr{marR=73152}"),
("a:tc", "margin_top", Inches(0.08), "a:tc/a:tcPr{marT=73152}"),
("a:tc", "margin_bottom", Inches(0.08), "a:tc/a:tcPr{marB=73152}"),
("a:tc", "margin_left", None, "a:tc"),
("a:tc/a:tcPr{marL=42}", "margin_left", None, "a:tc/a:tcPr"),
]
)
def margin_set_fixture(self, request):
tc_cxml, margin_prop_name, new_value, expected_tc_cxml = request.param
cell = _Cell(element(tc_cxml), None)
expected_xml = xml(expected_tc_cxml)
return cell, margin_prop_name, new_value, expected_xml
@pytest.fixture(
params=["margin_left", "margin_right", "margin_top", "margin_bottom"]
)
def margin_raises_fixture(self, request):
margin_prop_name = request.param
cell = _Cell(element("a:tc"), None)
val_of_invalid_type = "foobar"
return cell, margin_prop_name, val_of_invalid_type
@pytest.fixture(
params=[
("a:tc", False),
("a:tc{gridSpan=1}", False),
("a:tc{hMerge=1}", False),
("a:tc{gridSpan=2,vMerge=1}", False),
("a:tc{gridSpan=2}", True),
("a:tc{rowSpan=2}", True),
("a:tc{gridSpan=2,rowSpan=3}", True),
]
)
def origin_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
("a:tc", False),
("a:tc{gridSpan=2}", False),
("a:tc{hMerge=1}", True),
("a:tc{gridSpan=2,vMerge=1}", True),
("a:tc{rowSpan=2,hMerge=true}", True),
("a:tc{gridSpan=2,rowSpan=3}", False),
]
)
def spanned_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
@pytest.fixture(
params=[
(
"a:tbl/(a:tr/(a:tc{gridSpan=2},a:tc{hMerge=1}),a:tr/(a:tc,a:tc))",
0,
[0, 1],
),
(
"a:tbl/(a:tr/(a:tc{rowSpan=2},a:tc),a:tr/(a:tc{vMerge=1},a:tc))",
0,
[0, 2],
),
(
"a:tbl/(a:tr/(a:tc{gridSpan=2,rowSpan=2},a:tc{hMerge=1,rowSpan=2}),"
"a:tr/(a:tc{gridSpan=2,vMerge=1},a:tc{hMerge=1,vMerge=1}))",
0,
[0, 1, 2, 3],
),
]
)
def split_fixture(self, request):
tbl_cxml, origin_tc_idx, range_tc_idxs = request.param
tcs = element(tbl_cxml).xpath("//a:tc")
origin_tc = tcs[origin_tc_idx]
range_tcs = tuple(tcs[idx] for idx in range_tc_idxs)
return origin_tc, range_tcs
@pytest.fixture(
params=[("a:tc", 1), ("a:tc{rowSpan=2}", 1), ("a:tc{gridSpan=24}", 24)]
)
def width_fixture(self, request):
tc_cxml, expected_value = request.param
tc = element(tc_cxml)
return tc, expected_value
# fixture components ---------------------------------------------
@pytest.fixture
def cell(self):
return _Cell(element("a:tc"), None)
@pytest.fixture
def TcRange_(self, request):
return class_mock(request, "pptx.table.TcRange")
@pytest.fixture
def tc_range_(self, request):
return instance_mock(request, TcRange)
@pytest.fixture
def text_frame_(self, request):
return instance_mock(request, TextFrame)
@pytest.fixture
def text_frame_prop_(self, request):
return property_mock(request, _Cell, "text_frame")
class Describe_CellCollection(object):
def it_knows_how_many_cells_it_contains(self, len_fixture):
cells, expected_count = len_fixture
assert len(cells) == expected_count
def it_can_iterate_over_the_cells_it_contains(self, iter_fixture):
cell_collection, _Cell_, calls, expected_cells = iter_fixture
cells = list(cell_collection)
assert _Cell_.call_args_list == calls
assert cells == expected_cells
def it_supports_indexed_access(self, _Cell_, cell_):
tr = element("a:tr/(a:tc, a:tc, a:tc)")
tcs = tr.xpath("//a:tc")
_Cell_.return_value = cell_
cell_collection = _CellCollection(tr, None)
cell = cell_collection[1]
_Cell_.assert_called_once_with(tcs[1], cell_collection)
assert cell is cell_
def it_raises_on_indexed_access_out_of_range(self):
cells = _CellCollection(element("a:tr/a:tc"), None)
with pytest.raises(IndexError):
cells[-1]
with pytest.raises(IndexError):
cells[9]
# fixtures -------------------------------------------------------
@pytest.fixture(params=["a:tr", "a:tr/a:tc", "a:tr/(a:tc, a:tc, a:tc)"])
def iter_fixture(self, request, _Cell_):
tr_cxml = request.param
tr = element(tr_cxml)
tcs = tr.xpath("//a:tc")
cell_collection = _CellCollection(tr, None)
expected_cells = [
instance_mock(request, _Cell, name="cell%d" % idx)
for idx in range(len(tcs))
]
_Cell_.side_effect = expected_cells
calls = [call(tc, cell_collection) for tc in tcs]
return cell_collection, _Cell_, calls, expected_cells
@pytest.fixture(params=[("a:tr", 0), ("a:tr/a:tc", 1), ("a:tr/(a:tc, a:tc)", 2)])
def len_fixture(self, request):
tr_cxml, expected_len = request.param
cells = _CellCollection(element(tr_cxml), None)
return cells, expected_len
# fixture components ---------------------------------------------
@pytest.fixture
def _Cell_(self, request):
return class_mock(request, "pptx.table._Cell")
@pytest.fixture
def cell_(self, request):
return instance_mock(request, _Cell)
class Describe_Column(object):
def it_knows_its_width(self, width_get_fixture):
column, expected_value = width_get_fixture
width = column.width
assert width == expected_value
assert isinstance(width, Length)
def it_can_change_its_width(self, width_set_fixture):
column, new_width, expected_xml, parent_ = width_set_fixture
column.width = new_width
assert column._gridCol.xml == expected_xml
parent_.notify_width_changed.assert_called_once_with()
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[("a:gridCol{w=914400}", Inches(1)), ("a:gridCol{w=10pt}", Pt(10))]
)
def width_get_fixture(self, request):
gridCol_cxml, expected_value = request.param
column = _Column(element(gridCol_cxml), None)
return column, expected_value
@pytest.fixture(
params=[
("a:gridCol{w=12pt}", Inches(1), "a:gridCol{w=914400}"),
("a:gridCol{w=1234}", Inches(1), "a:gridCol{w=914400}"),
]
)
def width_set_fixture(self, request, parent_):
gridCol_cxml, new_width, expected_gridCol_cxml = request.param
column = _Column(element(gridCol_cxml), parent_)
expected_xml = xml(expected_gridCol_cxml)
return column, new_width, expected_xml, parent_
# fixture components ---------------------------------------------
@pytest.fixture
def parent_(self, request):
return instance_mock(request, _ColumnCollection)
class Describe_ColumnCollection(object):
def it_knows_how_many_columns_it_contains(self, len_fixture):
columns, expected_count = len_fixture
assert len(columns) == expected_count
def it_can_iterate_over_the_columns_it_contains(self, iter_fixture):
columns, expected_gridCol_lst = iter_fixture
count = 0
for idx, column in enumerate(columns):
assert isinstance(column, _Column)
assert column._gridCol is expected_gridCol_lst[idx]
count += 1
assert count == len(expected_gridCol_lst)
def it_supports_indexed_access(self, getitem_fixture):
columns, expected_gridCol_lst = getitem_fixture
for idx, gridCol in enumerate(expected_gridCol_lst):
column = columns[idx]
assert isinstance(column, _Column)
assert column._gridCol is gridCol
def it_raises_on_indexed_access_out_of_range(self):
columns = _ColumnCollection(element("a:tbl/a:tblGrid/a:gridCol"), None)
with pytest.raises(IndexError):
columns[-1]
with pytest.raises(IndexError):
columns[9]
# fixtures -------------------------------------------------------
@pytest.fixture(
params=[
"a:tbl/a:tblGrid",
"a:tbl/a:tblGrid/a:gridCol",
"a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)",
]
)
def getitem_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
columns = _ColumnCollection(tbl, None)
expected_column_lst = tbl.xpath("//a:gridCol")
return columns, expected_column_lst
@pytest.fixture(
params=[
"a:tbl/a:tblGrid",
"a:tbl/a:tblGrid/a:gridCol",
"a:tbl/a:tblGrid/(a:gridCol, a:gridCol, a:gridCol)",
]
)
def iter_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
columns = _ColumnCollection(tbl, None)
expected_column_lst = tbl.xpath("//a:gridCol")
return columns, expected_column_lst
@pytest.fixture(
params=[
("a:tbl/a:tblGrid", 0),
("a:tbl/a:tblGrid/a:gridCol", 1),
("a:tbl/a:tblGrid/(a:gridCol,a:gridCol)", 2),
]
)
def len_fixture(self, request):
tbl_cxml, expected_len = request.param
columns = _ColumnCollection(element(tbl_cxml), None)
return columns, expected_len
class Describe_Row(object):
def it_knows_its_height(self, height_get_fixture):
row, expected_value = height_get_fixture
height = row.height
assert height == expected_value
assert isinstance(height, Length)
def it_can_change_its_height(self, height_set_fixture):
row, new_height, expected_xml, parent_ = height_set_fixture
row.height = new_height
assert row._tr.xml == expected_xml
parent_.notify_height_changed.assert_called_once_with()
def it_provides_access_to_its_cells(self, cells_fixture):
row, _CellCollection_, cells_ = cells_fixture
cells = row.cells
_CellCollection_.assert_called_once_with(row._tr, row)
assert cells is cells_
# fixtures -------------------------------------------------------
@pytest.fixture
def cells_fixture(self, _CellCollection_, cells_):
row = _Row(element("a:tr"), None)
return row, _CellCollection_, cells_
@pytest.fixture(params=[("a:tr{h=914400}", Inches(1)), ("a:tr{h=10pt}", Pt(10))])
def height_get_fixture(self, request):
tr_cxml, expected_value = request.param
row = _Row(element(tr_cxml), None)
return row, expected_value
@pytest.fixture(
params=[
("a:tr{h=12pt}", Inches(1), "a:tr{h=914400}"),
("a:tr{h=1234}", Inches(1), "a:tr{h=914400}"),
]
)
def height_set_fixture(self, request, parent_):
tr_cxml, new_height, expected_tr_cxml = request.param
row = _Row(element(tr_cxml), parent_)
expected_xml = xml(expected_tr_cxml)
return row, new_height, expected_xml, parent_
# fixture components ---------------------------------------------
@pytest.fixture
def _CellCollection_(self, request, cells_):
return class_mock(request, "pptx.table._CellCollection", return_value=cells_)
@pytest.fixture
def cells_(self, request):
return instance_mock(request, _CellCollection)
@pytest.fixture
def parent_(self, request):
return instance_mock(request, _RowCollection)
class Describe_RowCollection(object):
def it_knows_how_many_rows_it_contains(self, len_fixture):
rows, expected_count = len_fixture
assert len(rows) == expected_count
def it_can_iterate_over_the_rows_it_contains(self, iter_fixture):
rows, expected_tr_lst = iter_fixture
count = 0
for idx, row in enumerate(rows):
assert isinstance(row, _Row)
assert row._tr is expected_tr_lst[idx]
count += 1
assert count == len(expected_tr_lst)
def it_supports_indexed_access(self, getitem_fixture):
rows, expected_tr_lst = getitem_fixture
for idx, tr in enumerate(expected_tr_lst):
row = rows[idx]
assert isinstance(row, _Row)
assert row._tr is tr
def it_raises_on_indexed_access_out_of_range(self):
rows = _RowCollection(element("a:tbl/a:tr"), None)
with pytest.raises(IndexError):
rows[-1]
with pytest.raises(IndexError):
rows[9]
# fixtures -------------------------------------------------------
@pytest.fixture(params=["a:tbl", "a:tbl/a:tr", "a:tbl/(a:tr, a:tr, a:tr)"])
def getitem_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
rows = _RowCollection(tbl, None)
expected_row_lst = tbl.findall(qn("a:tr"))
return rows, expected_row_lst
@pytest.fixture(params=["a:tbl", "a:tbl/a:tr", "a:tbl/(a:tr, a:tr, a:tr)"])
def iter_fixture(self, request):
tbl_cxml = request.param
tbl = element(tbl_cxml)
rows = _RowCollection(tbl, None)
expected_row_lst = tbl.findall(qn("a:tr"))
return rows, expected_row_lst
@pytest.fixture(params=[("a:tbl", 0), ("a:tbl/a:tr", 1), ("a:tbl/(a:tr, a:tr)", 2)])
def len_fixture(self, request):
tbl_cxml, expected_len = request.param
rows = _RowCollection(element(tbl_cxml), None)
return rows, expected_len
| 2.296875 | 2 |
imread/tests/test_bmp.py | luispedro/imread | 51 | 3095 | <reponame>luispedro/imread<filename>imread/tests/test_bmp.py
import numpy as np
from imread import imread
from . import file_path
def test_read():
im = imread(file_path('star1.bmp'))
assert np.any(im)
assert im.shape == (128, 128, 3)
def test_indexed():
im = imread(file_path('py-installer-indexed.bmp'))
assert np.any(im)
assert im.shape == (352, 162, 3)
assert np.any(im[:,:,0])
assert np.any(im[:,:,1])
assert np.any(im[:,:,2])
| 2.34375 | 2 |
bl60x_flash/main.py | v3l0c1r4pt0r/bl60x-flash | 0 | 3096 | from serial import Serial
from tqdm import tqdm
import binascii
import hashlib
import struct
import time
import sys
import os
def if_read(ser, data_len):
data = bytearray(0)
received = 0
while received < data_len:
tmp = ser.read(data_len - received)
if len(tmp) == 0:
break
else:
data += tmp
received += len(tmp)
if len(data) != data_len:
return (0, data)
return (1, data)
def reset(ser):
ser.setRTS(0)
time.sleep(0.2)
reset_cnt = 2
while reset_cnt > 0:
ser.setRTS(1)
time.sleep(0.005)
ser.setRTS(0)
time.sleep(0.1)
ser.setRTS(1)
time.sleep(0.005)
ser.setRTS(0)
time.sleep(0.005)
reset_cnt -= 1
def handshake(ser):
ser.setRTS(1)
time.sleep(0.2)
ser.setRTS(0)
time.sleep(0.05)
ser.setRTS(1)
ser.setDTR(1)
time.sleep(0.1)
ser.setDTR(0)
time.sleep(0.1)
def expect_ok(ser):
data = ser.read(2)
if data[0] != 0x4f or data[1] != 0x4b:
err = ser.read(2)
raise ValueError(binascii.hexlify(err))
def expect_data(ser):
expect_ok(ser)
len = ser.read(2)
len = struct.unpack('<h', len)[0]
data = ser.read(len)
return data
def cmd_load_seg_header(ser, file):
header = file.read(0x10)
ser.write(b'\x17\x00\x10\x00' + header)
data = expect_data(ser)
seg_addr, seg_len = struct.unpack('<II', data[0:8])
print(f'{seg_len} bytes @ {hex(seg_addr)}')
return seg_len
def cmd_load_seg_data(ser, data):
ser.write(b'\x18\x00' + struct.pack('<H', len(data)) + data)
expect_ok(ser)
def cmd_load_boot_header(ser, file):
header = file.read(0xb0)
ser.write(b'\x11\x00\xb0\x00' + header)
expect_ok(ser)
def cmd_check_image(ser):
ser.write(b'\x19\x00\x00\x00')
expect_ok(ser)
def cmd_run_image(ser):
ser.write(b'\x1a\x00\x00\x00')
expect_ok(ser)
def load_image(ser, file):
image = open(file, 'rb')
cmd_load_boot_header(ser, image)
total = cmd_load_seg_header(ser, image)
sent = 0
with tqdm(total=total, unit='byte', unit_scale=True) as pbar:
while sent != total:
chunk = image.read(min(total-sent, 4080))
cmd_load_seg_data(ser, chunk)
sent = sent + len(chunk)
pbar.update(len(chunk))
cmd_check_image(ser)
cmd_run_image(ser)
def empty_buffer(ser):
timeout = ser.timeout
ser.timeout = 0.1
if_read(ser, 10000)
ser.timeout = timeout
def send_sync(ser):
empty_buffer(ser)
ser.write(b'\x55' * int(0.006 * ser.baudrate / 10))
expect_ok(ser)
def efl_write_cmd(ser, id, payload = b''):
plen = len(payload)
plen_data = struct.pack('<h', plen)
checksum = struct.pack('<h', sum(plen_data + payload) & 0xff)[0:1]
data = bytes([id]) + checksum + plen_data + payload
ser.write(data)
def efl_cmd_read_memory(ser, addr):
# there is a length parameter here but it doesn't seem to work correctly
efl_write_cmd(ser, 0x51, struct.pack('<II', addr, 0x4))
return expect_data(ser)
def efl_cmd_write_memory(ser, addr, data):
efl_write_cmd(ser, 0x50, struct.pack('<I', len(data)) + data)
expect_ok(ser)
def efl_cmd_read_jid(ser):
efl_write_cmd(ser, 0x36)
return expect_data(ser)
def efl_cmd_flash_erase(ser, addr, len):
end_addr = addr + len - 1
efl_write_cmd(ser, 0x30, struct.pack('<II', addr, end_addr))
timeout = ser.timeout
ser.timeout = 10.0
expect_ok(ser)
ser.timeout = timeout
print(f'Erased {len} bytes @ {hex(addr)}')
def efl_cmd_flash_write(ser, addr, data):
efl_write_cmd(ser, 0x31, struct.pack('<I', addr) + data)
expect_ok(ser)
def efl_cmd_flash_write_check(ser):
efl_write_cmd(ser, 0x3a)
expect_ok(ser)
def efl_cmd_flash_xip_read_start(ser):
efl_write_cmd(ser, 0x60)
expect_ok(ser)
def efl_cmd_flash_xip_read_sha(ser, addr, len):
efl_write_cmd(ser, 0x3e, struct.pack('<II', addr, len))
return expect_data(ser)
def efl_cmd_flash_xip_read_finish(ser):
efl_write_cmd(ser, 0x61)
expect_ok(ser)
def efl_cmd_reset(ser):
efl_write_cmd(ser, 0x21)
expect_ok(ser)
def efl_program_img(ser, addr, data):
data_len = len(data)
efl_cmd_flash_erase(ser, addr, data_len)
print(f'Programming {data_len} bytes @ {hex(addr)}')
sent = 0
with tqdm(total=data_len, unit='byte', unit_scale=True) as pbar:
while sent != data_len:
buf_len = min(2048, data_len - sent)
buf = data[sent:sent + buf_len]
efl_cmd_flash_write(ser, addr + sent, buf)
sent = sent + buf_len
pbar.update(buf_len)
efl_cmd_flash_write_check(ser)
sha256sum = hashlib.sha256(data).digest()
efl_cmd_flash_xip_read_start(ser)
device_sum = efl_cmd_flash_xip_read_sha(ser, addr, data_len)
efl_cmd_flash_xip_read_finish(ser)
if device_sum != sha256sum:
print('Verification failed')
print('Host SHA256:', binascii.hexlify(sha256sum))
print('BL SHA256:', binascii.hexlify(device_sum))
return False
print('Verified by XIP SHA256 hash')
return True
def prepend_fw_header(img, header_file):
if img[0:4] == b'BFNP':
print('Image already has FW header')
return img
with open(header_file, 'rb') as f:
header = f.read()
img = header + (b'\xFF' * (4096-len(header))) + img
return img
def get_contrib_path(name):
sep = os.path.sep
return os.path.dirname(os.path.realpath(__file__)) + sep + 'contrib' + sep + name
def main():
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <serial port> <firmware bin>')
sys.exit(1)
ser = Serial(sys.argv[1], baudrate=500000, timeout=2)
handshake(ser)
reset(ser)
send_sync(ser)
time.sleep(0.1)
print('Loading helper binary')
load_image(ser, get_contrib_path('eflash_loader_40m.bin'))
time.sleep(0.2)
print()
# at this point, the eflash loader binary is running with efl_ commands
# (which seems to work with a higher baudrate)
ser.baudrate = 2000000
send_sync(ser)
with open(sys.argv[2], 'rb') as f:
data = f.read()
data = prepend_fw_header(data, get_contrib_path('bootheader.bin'))
efl_program_img(ser, 0x10000, data)
efl_cmd_reset(ser)
if __name__ == "__main__":
main()
| 2.484375 | 2 |
lang/py/test/test_avro_builder.py | zerofox-oss/yelp-avro | 0 | 3097 | <filename>lang/py/test/test_avro_builder.py
# -*- coding: utf-8 -*-
import unittest
from avro import avro_builder
from avro import schema
class TestAvroSchemaBuilder(unittest.TestCase):
def setUp(self):
self.builder = avro_builder.AvroSchemaBuilder()
def tearDown(self):
del self.builder
@property
def name(self):
return 'foo'
@property
def namespace(self):
return 'ns'
@property
def aliases(self):
return ['new_foo']
@property
def doc(self):
return 'sample doc'
@property
def metadata(self):
return {'key1': 'val1', 'key2': 'val2'}
@property
def enum_symbols(self):
return ['a', 'b']
@property
def fixed_size(self):
return 16
@property
def another_name(self):
return 'bar'
@property
def invalid_schemas(self):
undefined_schema_name = 'unknown'
yield undefined_schema_name
non_avro_schema = {'foo': 'bar'}
yield non_avro_schema
named_schema_without_name = {'name': '', 'type': 'fixed', 'size': 16}
yield named_schema_without_name
invalid_schema = {'name': 'foo', 'type': 'enum', 'symbols': ['a', 'a']}
yield invalid_schema
none_schema = None
yield none_schema
@property
def invalid_names(self):
missing_name = None
yield missing_name
reserved_name = 'int'
yield reserved_name
non_string_name = 100
yield non_string_name
@property
def duplicate_name_err(self):
return '"{0}" is already in use.'
def test_create_primitive_types(self):
self.assertEqual('null', self.builder.create_null())
self.assertEqual('boolean', self.builder.create_boolean())
self.assertEqual('int', self.builder.create_int())
self.assertEqual('long', self.builder.create_long())
self.assertEqual('float', self.builder.create_float())
self.assertEqual('double', self.builder.create_double())
self.assertEqual('bytes', self.builder.create_bytes())
self.assertEqual('string', self.builder.create_string())
def test_create_enum(self):
actual_json = self.builder.begin_enum(self.name, self.enum_symbols).end()
expected_json = {
'type': 'enum',
'name': self.name,
'symbols': self.enum_symbols
}
self.assertEqual(expected_json, actual_json)
def test_create_enum_with_optional_attributes(self):
actual_json = self.builder.begin_enum(
self.name,
self.enum_symbols,
self.namespace,
self.aliases,
self.doc,
**self.metadata
).end()
expected_json = {
'type': 'enum',
'name': self.name,
'symbols': self.enum_symbols,
'namespace': self.namespace,
'aliases': self.aliases,
'doc': self.doc
}
expected_json.update(self.metadata)
self.assertEqual(expected_json, actual_json)
def test_create_enum_with_invalid_name(self):
for invalid_name in self.invalid_names:
self.builder.clear()
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_enum(invalid_name, self.enum_symbols).end()
def test_create_enum_with_dup_name(self):
with self.assertRaisesRegexp(
schema.SchemaParseException,
self.duplicate_name_err.format(self.name)
):
self.builder.begin_record(self.name)
self.builder.add_field(
self.another_name,
self.builder.begin_enum(self.name, self.enum_symbols).end()
)
self.builder.end()
def test_create_enum_with_invalid_symbols(self):
self.single_test_create_enum_with_invalid_symbols(None)
self.single_test_create_enum_with_invalid_symbols('')
self.single_test_create_enum_with_invalid_symbols('a')
self.single_test_create_enum_with_invalid_symbols(['a', 1])
self.single_test_create_enum_with_invalid_symbols([1, 2, 3])
self.single_test_create_enum_with_invalid_symbols(['a', 'a'])
def single_test_create_enum_with_invalid_symbols(self, invalid_symbols):
self.builder.clear()
with self.assertRaises(schema.AvroException):
self.builder.begin_enum(self.name, invalid_symbols).end()
def test_create_fixed(self):
actual_json = self.builder.begin_fixed(self.name, self.fixed_size).end()
expected_json = {
'type': 'fixed',
'name': self.name,
'size': self.fixed_size
}
self.assertEqual(expected_json, actual_json)
def test_create_fixed_with_optional_attributes(self):
actual_json = self.builder.begin_fixed(
self.name,
self.fixed_size,
self.namespace,
self.aliases,
**self.metadata
).end()
expected_json = {
'type': 'fixed',
'name': self.name,
'size': self.fixed_size,
'namespace': self.namespace,
'aliases': self.aliases,
}
expected_json.update(self.metadata)
self.assertEqual(expected_json, actual_json)
def test_create_fixed_with_invalid_name(self):
for invalid_name in self.invalid_names:
self.builder.clear()
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_fixed(invalid_name, self.fixed_size).end()
def test_create_fixed_with_dup_name(self):
with self.assertRaisesRegexp(
schema.SchemaParseException,
self.duplicate_name_err.format(self.name)
):
self.builder.begin_record(self.name)
self.builder.add_field(
self.another_name,
self.builder.begin_fixed(self.name, self.fixed_size).end()
)
self.builder.end()
def test_create_fixed_with_invalid_size(self):
self.single_test_create_fixed_with_invalid_size(None)
self.single_test_create_fixed_with_invalid_size('ten')
def single_test_create_fixed_with_invalid_size(self, invalid_size):
self.builder.clear()
with self.assertRaises(schema.AvroException):
self.builder.begin_fixed(self.name, invalid_size).end()
def test_create_array(self):
actual_json = self.builder.begin_array(self.builder.create_int()).end()
expected_json = {'type': 'array', 'items': 'int'}
self.assertEqual(expected_json, actual_json)
def test_create_array_with_optional_attributes(self):
actual_json = self.builder.begin_array(
self.builder.create_int(),
**self.metadata
).end()
expected_json = {'type': 'array', 'items': 'int'}
expected_json.update(self.metadata)
self.assertEqual(expected_json, actual_json)
def test_create_array_with_complex_type(self):
actual_json = self.builder.begin_array(
self.builder.begin_enum(self.name, self.enum_symbols).end()
).end()
expected_json = {
'type': 'array',
'items': {
'type': 'enum',
'name': self.name,
'symbols': self.enum_symbols
}
}
self.assertEqual(expected_json, actual_json)
def test_create_array_with_invalid_items_type(self):
for invalid_schema in self.invalid_schemas:
self.builder.clear()
with self.assertRaises(schema.AvroException):
self.builder.begin_array(invalid_schema).end()
def test_create_map(self):
actual_json = self.builder.begin_map(self.builder.create_string()).end()
expected_json = {'type': 'map', 'values': 'string'}
self.assertEqual(expected_json, actual_json)
def test_create_map_with_optional_attributes(self):
actual_json = self.builder.begin_map(
self.builder.create_string(),
**self.metadata
).end()
expected_json = {'type': 'map', 'values': 'string'}
expected_json.update(self.metadata)
self.assertEqual(expected_json, actual_json)
def test_create_map_with_complex_type(self):
actual_json = self.builder.begin_map(
self.builder.begin_fixed(self.name, self.fixed_size).end()
).end()
expected_json = {
'type': 'map',
'values': {
'type': 'fixed',
'name': self.name,
'size': self.fixed_size
}
}
self.assertEqual(expected_json, actual_json)
def test_create_map_with_invalid_values_type(self):
for invalid_schema in self.invalid_schemas:
self.builder.clear()
with self.assertRaises(schema.AvroException):
self.builder.begin_map(invalid_schema).end()
def test_create_record(self):
self.builder.begin_record(self.name)
self.builder.add_field(
'bar1',
self.builder.create_int()
)
self.builder.add_field(
'bar2',
self.builder.begin_map(self.builder.create_double()).end()
)
actual_json = self.builder.end()
expected_json = {
'type': 'record',
'name': self.name,
'fields': [
{'name': 'bar1', 'type': 'int'},
{'name': 'bar2', 'type': {'type': 'map', 'values': 'double'}}
]
}
self.assertEqual(expected_json, actual_json)
def test_create_record_with_optional_attributes(self):
self.builder.begin_record(
self.name,
namespace=self.namespace,
aliases=self.aliases,
doc=self.doc,
**self.metadata
)
self.builder.add_field(
self.another_name,
self.builder.create_int()
)
actual_json = self.builder.end()
expected_json = {
'type': 'record',
'name': self.name,
'fields': [{'name': self.another_name, 'type': 'int'}],
'namespace': self.namespace,
'aliases': self.aliases,
'doc': self.doc
}
expected_json.update(self.metadata)
self.assertEqual(expected_json, actual_json)
def test_create_field_with_optional_attributes(self):
self.builder.begin_record(self.name)
self.builder.add_field(
self.another_name,
self.builder.create_boolean(),
has_default=True,
default_value=True,
sort_order='ascending',
aliases=self.aliases,
doc=self.doc,
**self.metadata
)
actual_json = self.builder.end()
expected_field = {
'name': self.another_name,
'type': 'boolean',
'default': True,
'order': 'ascending',
'aliases': self.aliases,
'doc': self.doc
}
expected_field.update(self.metadata)
expected_json = {
'type': 'record',
'name': self.name,
'fields': [expected_field]
}
self.assertEqual(expected_json, actual_json)
def test_create_record_with_no_field(self):
actual_json = self.builder.begin_record(self.name).end()
expected_json = {'type': 'record', 'name': self.name, 'fields': []}
self.assertEqual(expected_json, actual_json)
def test_create_record_with_invalid_name(self):
for invalid_name in self.invalid_names:
self.builder.clear()
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_record(invalid_name)
self.builder.add_field(
self.another_name,
self.builder.create_int()
)
self.builder.end()
def test_create_record_with_dup_name(self):
with self.assertRaisesRegexp(
schema.SchemaParseException,
self.duplicate_name_err.format(self.name)
):
self.builder.begin_record(self.another_name)
self.builder.add_field(
'bar1',
self.builder.begin_enum(self.name, self.enum_symbols).end()
)
self.builder.add_field(
'bar2',
self.builder.begin_record(self.name).end()
)
self.builder.end()
def test_create_record_with_dup_field_name(self):
with self.assertRaisesRegexp(
schema.SchemaParseException,
"{0} already in use.".format(self.another_name)
):
self.builder.begin_record(self.name)
self.builder.add_field(
self.another_name,
self.builder.create_int()
)
self.builder.add_field(
self.another_name,
self.builder.create_string()
)
self.builder.end()
def test_create_field_with_invalid_type(self):
for invalid_schema in self.invalid_schemas:
self.builder.clear()
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_record(self.name)
self.builder.add_field(
self.another_name,
invalid_schema
)
self.builder.end()
def test_create_field_with_invalid_sort_order(self):
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_record(self.name)
self.builder.add_field(
self.another_name,
self.builder.create_int(),
sort_order='asc'
)
self.builder.end()
def test_create_union(self):
actual_json = self.builder.begin_union(
self.builder.create_null(),
self.builder.create_string(),
self.builder.begin_enum(self.name, self.enum_symbols).end()
).end()
expected_json = [
'null',
'string',
{'type': 'enum', 'name': self.name, 'symbols': self.enum_symbols}
]
self.assertEqual(expected_json, actual_json)
def test_create_union_with_empty_sub_schemas(self):
actual_json = self.builder.begin_union().end()
expected_json = []
self.assertEqual(expected_json, actual_json)
def test_create_union_with_nested_union_schema(self):
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_union(
self.builder.begin_union(self.builder.create_int()).end()
).end()
def test_create_union_with_invalid_schema(self):
for invalid_schema in self.invalid_schemas:
self.builder.clear()
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_union(invalid_schema).end()
def test_create_union_with_dup_primitive_schemas(self):
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_union(
self.builder.create_int(),
self.builder.create_int()
).end()
def test_create_union_with_dup_named_schemas(self):
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_union(
self.builder.begin_enum(self.name, self.enum_symbols).end(),
self.builder.begin_fixed(self.name, self.fixed_size).end()
).end()
def test_create_union_with_dup_complex_schemas(self):
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_union(
self.builder.begin_map(self.builder.create_int()).end(),
self.builder.begin_map(self.builder.create_int()).end()
).end()
def test_create_nullable_type(self):
# non-union schema type
actual_json = self.builder.begin_nullable_type(
self.builder.create_int()
).end()
expected_json = ['null', 'int']
self.assertEqual(expected_json, actual_json)
# union schema type
actual_json = self.builder.begin_nullable_type(
[self.builder.create_int()]
).end()
expected_json = ['null', 'int']
self.assertEqual(expected_json, actual_json)
def test_create_nullable_type_with_default_value(self):
# non-union schema type
actual_json = self.builder.begin_nullable_type(
self.builder.create_int(),
10
).end()
expected_json = ['int', 'null']
self.assertEqual(expected_json, actual_json)
# union schema type
actual_json = self.builder.begin_nullable_type(
[self.builder.create_int()],
10
).end()
expected_json = ['int', 'null']
self.assertEqual(expected_json, actual_json)
def test_create_nullable_type_with_null_type(self):
actual_json = self.builder.begin_nullable_type(
self.builder.create_null()
).end()
expected_json = 'null'
self.assertEqual(expected_json, actual_json)
def test_create_nullable_type_with_nullable_type(self):
actual_json = self.builder.begin_nullable_type(
self.builder.begin_union(
self.builder.create_null(),
self.builder.create_long()
).end(),
10
).end()
expected_json = ['null', 'long']
self.assertEqual(expected_json, actual_json)
def test_create_nullable_type_with_invalid_type(self):
for invalid_schema in self.invalid_schemas:
self.builder.clear()
with self.assertRaises(schema.SchemaParseException):
self.builder.begin_nullable_type(invalid_schema)
def test_create_schema_with_preloaded_json(self):
schema_json = {
'type': 'record',
'name': self.name,
'fields': [
{'name': 'field', 'type': {'type': 'map', 'values': 'double'}}
]
}
self.builder.begin_with_schema_json(schema_json)
self.builder.add_field(
'field_new',
self.builder.create_int()
)
actual_json = self.builder.end()
expected_json = schema_json.copy()
expected_json['fields'].append({'name': 'field_new', 'type': 'int'})
self.assertEqual(expected_json, actual_json)
def test_removed_field(self):
self.builder.begin_record(self.name)
self.builder.add_field('bar1', self.builder.create_int())
self.builder.add_field('bar2', self.builder.create_int())
self.builder.remove_field('bar1')
actual_json = self.builder.end()
expected_json = {
'type': 'record',
'name': self.name,
'fields': [{'name': 'bar2', 'type': 'int'}]
}
self.assertEqual(expected_json, actual_json)
def test_removed_nonexistent_field(self):
schema_json = {
'type': 'record',
'name': self.name,
'fields': [{'name': 'bar2', 'type': 'int'}]
}
with self.assertRaises(avro_builder.AvroBuildInvalidOperation):
self.builder.begin_with_schema_json(schema_json)
self.builder.remove_field('bar1')
self.builder.end()
if __name__ == '__main__':
unittest.main()
| 2.390625 | 2 |
monte_py/__init__.py | domluna/fun_with_ffi | 1 | 3098 | import random
def estimate_pi(sims, needles):
trials = []
for _ in xrange(sims):
trials.append(simulate_pi(needles))
mean = sum(trials) / sims
return mean
# use a unit square
def simulate_pi(needles):
hits = 0 # how many hits we hit the circle
for _ in xrange(needles):
x = random.uniform(-1., 1.)
y = random.uniform(-1, 1.)
if x*x + y*y <= 1.0:
hits += 1
return 4. * (hits / float(needles))
| 3.34375 | 3 |
tools/mpy_ld.py | UVA-DSI/circuitpython | 1 | 3099 | #!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Link .o files to .mpy
"""
import sys, os, struct, re
from elftools.elf import elffile
sys.path.append(os.path.dirname(__file__) + "/../py")
import makeqstrdata as qstrutil
# MicroPython constants
MPY_VERSION = 5
MP_NATIVE_ARCH_X86 = 1
MP_NATIVE_ARCH_X64 = 2
MP_NATIVE_ARCH_ARMV7M = 5
MP_NATIVE_ARCH_ARMV7EMSP = 7
MP_NATIVE_ARCH_ARMV7EMDP = 8
MP_NATIVE_ARCH_XTENSA = 9
MP_NATIVE_ARCH_XTENSAWIN = 10
MP_CODE_BYTECODE = 2
MP_CODE_NATIVE_VIPER = 4
MP_SCOPE_FLAG_VIPERRELOC = 0x20
MP_SCOPE_FLAG_VIPERRODATA = 0x40
MP_SCOPE_FLAG_VIPERBSS = 0x80
MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE = 1
MICROPY_PY_BUILTINS_STR_UNICODE = 2
MP_SMALL_INT_BITS = 31
QSTR_WINDOW_SIZE = 32
# ELF constants
R_386_32 = 1
R_X86_64_64 = 1
R_XTENSA_32 = 1
R_386_PC32 = 2
R_X86_64_PC32 = 2
R_ARM_ABS32 = 2
R_386_GOT32 = 3
R_ARM_REL32 = 3
R_386_PLT32 = 4
R_X86_64_PLT32 = 4
R_XTENSA_PLT = 6
R_386_GOTOFF = 9
R_386_GOTPC = 10
R_ARM_THM_CALL = 10
R_XTENSA_DIFF32 = 19
R_XTENSA_SLOT0_OP = 20
R_ARM_BASE_PREL = 25 # aka R_ARM_GOTPC
R_ARM_GOT_BREL = 26 # aka R_ARM_GOT32
R_ARM_THM_JUMP24 = 30
R_X86_64_REX_GOTPCRELX = 42
R_386_GOT32X = 43
################################################################################
# Architecture configuration
def asm_jump_x86(entry):
return struct.pack("<BI", 0xE9, entry - 5)
def asm_jump_arm(entry):
b_off = entry - 4
if b_off >> 11 == 0 or b_off >> 11 == -1:
# Signed value fits in 12 bits
b0 = 0xE000 | (b_off >> 1 & 0x07FF)
b1 = 0
else:
# Use large jump
b0 = 0xF000 | (b_off >> 12 & 0x07FF)
b1 = 0xB800 | (b_off >> 1 & 0x7FF)
return struct.pack("<HH", b0, b1)
def asm_jump_xtensa(entry):
jump_offset = entry - 4
jump_op = jump_offset << 6 | 6
return struct.pack("<BH", jump_op & 0xFF, jump_op >> 8)
class ArchData:
def __init__(self, name, mpy_feature, qstr_entry_size, word_size, arch_got, asm_jump):
self.name = name
self.mpy_feature = mpy_feature
self.qstr_entry_size = qstr_entry_size
self.word_size = word_size
self.arch_got = arch_got
self.asm_jump = asm_jump
self.separate_rodata = name == "EM_XTENSA" and qstr_entry_size == 4
ARCH_DATA = {
"x86": ArchData(
"EM_386",
MP_NATIVE_ARCH_X86 << 2
| MICROPY_PY_BUILTINS_STR_UNICODE
| MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE,
2,
4,
(R_386_PC32, R_386_GOT32, R_386_GOT32X),
asm_jump_x86,
),
"x64": ArchData(
"EM_X86_64",
MP_NATIVE_ARCH_X64 << 2
| MICROPY_PY_BUILTINS_STR_UNICODE
| MICROPY_OPT_CACHE_MAP_LOOKUP_IN_BYTECODE,
2,
8,
(R_X86_64_REX_GOTPCRELX,),
asm_jump_x86,
),
"armv7m": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7M << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"armv7emsp": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7EMSP << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"armv7emdp": ArchData(
"EM_ARM",
MP_NATIVE_ARCH_ARMV7EMDP << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_ARM_GOT_BREL,),
asm_jump_arm,
),
"xtensa": ArchData(
"EM_XTENSA",
MP_NATIVE_ARCH_XTENSA << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
2,
4,
(R_XTENSA_32, R_XTENSA_PLT),
asm_jump_xtensa,
),
"xtensawin": ArchData(
"EM_XTENSA",
MP_NATIVE_ARCH_XTENSAWIN << 2 | MICROPY_PY_BUILTINS_STR_UNICODE,
4,
4,
(R_XTENSA_32, R_XTENSA_PLT),
asm_jump_xtensa,
),
}
################################################################################
# Helper functions
def align_to(value, align):
return (value + align - 1) & ~(align - 1)
def unpack_u24le(data, offset):
return data[offset] | data[offset + 1] << 8 | data[offset + 2] << 16
def pack_u24le(data, offset, value):
data[offset] = value & 0xFF
data[offset + 1] = value >> 8 & 0xFF
data[offset + 2] = value >> 16 & 0xFF
def xxd(text):
for i in range(0, len(text), 16):
print("{:08x}:".format(i), end="")
for j in range(4):
off = i + j * 4
if off < len(text):
d = int.from_bytes(text[off : off + 4], "little")
print(" {:08x}".format(d), end="")
print()
# Smaller numbers are enabled first
LOG_LEVEL_1 = 1
LOG_LEVEL_2 = 2
LOG_LEVEL_3 = 3
log_level = LOG_LEVEL_1
def log(level, msg):
if level <= log_level:
print(msg)
################################################################################
# Qstr extraction
def extract_qstrs(source_files):
def read_qstrs(f):
with open(f) as f:
vals = set()
objs = set()
for line in f:
while line:
m = re.search(r"MP_OBJ_NEW_QSTR\((MP_QSTR_[A-Za-z0-9_]*)\)", line)
if m:
objs.add(m.group(1))
else:
m = re.search(r"MP_QSTR_[A-Za-z0-9_]*", line)
if m:
vals.add(m.group())
if m:
s = m.span()
line = line[: s[0]] + line[s[1] :]
else:
line = ""
return vals, objs
static_qstrs = ["MP_QSTR_" + qstrutil.qstr_escape(q) for q in qstrutil.static_qstr_list]
qstr_vals = set()
qstr_objs = set()
for f in source_files:
vals, objs = read_qstrs(f)
qstr_vals.update(vals)
qstr_objs.update(objs)
qstr_vals.difference_update(static_qstrs)
return static_qstrs, qstr_vals, qstr_objs
################################################################################
# Linker
class LinkError(Exception):
pass
class Section:
def __init__(self, name, data, alignment, filename=None):
self.filename = filename
self.name = name
self.data = data
self.alignment = alignment
self.addr = 0
self.reloc = []
@staticmethod
def from_elfsec(elfsec, filename):
assert elfsec.header.sh_addr == 0
return Section(elfsec.name, elfsec.data(), elfsec.data_alignment, filename)
class GOTEntry:
def __init__(self, name, sym, link_addr=0):
self.name = name
self.sym = sym
self.offset = None
self.link_addr = link_addr
def isexternal(self):
return self.sec_name.startswith(".external")
def istext(self):
return self.sec_name.startswith(".text")
def isrodata(self):
return self.sec_name.startswith((".rodata", ".data.rel.ro"))
def isbss(self):
return self.sec_name.startswith(".bss")
class LiteralEntry:
def __init__(self, value, offset):
self.value = value
self.offset = offset
class LinkEnv:
def __init__(self, arch):
self.arch = ARCH_DATA[arch]
self.sections = [] # list of sections in order of output
self.literal_sections = [] # list of literal sections (xtensa only)
self.known_syms = {} # dict of symbols that are defined
self.unresolved_syms = [] # list of unresolved symbols
self.mpy_relocs = [] # list of relocations needed in the output .mpy file
def check_arch(self, arch_name):
if arch_name != self.arch.name:
raise LinkError("incompatible arch")
def print_sections(self):
log(LOG_LEVEL_2, "sections:")
for sec in self.sections:
log(LOG_LEVEL_2, " {:08x} {} size={}".format(sec.addr, sec.name, len(sec.data)))
def find_addr(self, name):
if name in self.known_syms:
s = self.known_syms[name]
return s.section.addr + s["st_value"]
raise LinkError("unknown symbol: {}".format(name))
def build_got_generic(env):
env.got_entries = {}
for sec in env.sections:
for r in sec.reloc:
s = r.sym
if not (
s.entry["st_info"]["bind"] == "STB_GLOBAL"
and r["r_info_type"] in env.arch.arch_got
):
continue
s_type = s.entry["st_info"]["type"]
assert s_type in ("STT_NOTYPE", "STT_FUNC", "STT_OBJECT"), s_type
assert s.name
if s.name in env.got_entries:
continue
env.got_entries[s.name] = GOTEntry(s.name, s)
def build_got_xtensa(env):
env.got_entries = {}
env.lit_entries = {}
env.xt_literals = {}
# Extract the values from the literal table
for sec in env.literal_sections:
assert len(sec.data) % env.arch.word_size == 0
# Look through literal relocations to find any global pointers that should be GOT entries
for r in sec.reloc:
s = r.sym
s_type = s.entry["st_info"]["type"]
assert s_type in ("STT_NOTYPE", "STT_FUNC", "STT_OBJECT", "STT_SECTION"), s_type
assert r["r_info_type"] in env.arch.arch_got
assert r["r_offset"] % env.arch.word_size == 0
# This entry is a global pointer
existing = struct.unpack_from("<I", sec.data, r["r_offset"])[0]
if s_type == "STT_SECTION":
assert r["r_addend"] == 0
name = "{}+0x{:x}".format(s.section.name, existing)
else:
assert existing == 0
name = s.name
if r["r_addend"] != 0:
name = "{}+0x{:x}".format(name, r["r_addend"])
idx = "{}+0x{:x}".format(sec.filename, r["r_offset"])
env.xt_literals[idx] = name
if name in env.got_entries:
# Deduplicate GOT entries
continue
env.got_entries[name] = GOTEntry(name, s, existing)
# Go through all literal entries finding those that aren't global pointers so must be actual literals
for i in range(0, len(sec.data), env.arch.word_size):
idx = "{}+0x{:x}".format(sec.filename, i)
if idx not in env.xt_literals:
# This entry is an actual literal
value = struct.unpack_from("<I", sec.data, i)[0]
env.xt_literals[idx] = value
if value in env.lit_entries:
# Deduplicate literals
continue
env.lit_entries[value] = LiteralEntry(
value, len(env.lit_entries) * env.arch.word_size
)
def populate_got(env):
# Compute GOT destination addresses
for got_entry in env.got_entries.values():
sym = got_entry.sym
if hasattr(sym, "resolved"):
sym = sym.resolved
sec = sym.section
addr = sym["st_value"]
got_entry.sec_name = sec.name
got_entry.link_addr += sec.addr + addr
# Get sorted GOT, sorted by external, text, rodata, bss so relocations can be combined
got_list = sorted(
env.got_entries.values(),
key=lambda g: g.isexternal() + 2 * g.istext() + 3 * g.isrodata() + 4 * g.isbss(),
)
# Layout and populate the GOT
offset = 0
for got_entry in got_list:
got_entry.offset = offset
offset += env.arch.word_size
o = env.got_section.addr + got_entry.offset
env.full_text[o : o + env.arch.word_size] = got_entry.link_addr.to_bytes(
env.arch.word_size, "little"
)
# Create a relocation for each GOT entry
for got_entry in got_list:
if got_entry.name == "mp_fun_table":
dest = "mp_fun_table"
elif got_entry.name.startswith("mp_fun_table+0x"):
dest = int(got_entry.name.split("+")[1], 16) // env.arch.word_size
elif got_entry.sec_name.startswith(".text"):
dest = ".text"
elif got_entry.sec_name.startswith(".rodata"):
dest = ".rodata"
elif got_entry.sec_name.startswith(".data.rel.ro"):
dest = ".data.rel.ro"
elif got_entry.sec_name.startswith(".bss"):
dest = ".bss"
else:
assert 0, (got_entry.name, got_entry.sec_name)
env.mpy_relocs.append((".text", env.got_section.addr + got_entry.offset, dest))
# Print out the final GOT
log(LOG_LEVEL_2, "GOT: {:08x}".format(env.got_section.addr))
for g in got_list:
log(
LOG_LEVEL_2,
" {:08x} {} -> {}+{:08x}".format(g.offset, g.name, g.sec_name, g.link_addr),
)
def populate_lit(env):
log(LOG_LEVEL_2, "LIT: {:08x}".format(env.lit_section.addr))
for lit_entry in env.lit_entries.values():
value = lit_entry.value
log(LOG_LEVEL_2, " {:08x} = {:08x}".format(lit_entry.offset, value))
o = env.lit_section.addr + lit_entry.offset
env.full_text[o : o + env.arch.word_size] = value.to_bytes(env.arch.word_size, "little")
def do_relocation_text(env, text_addr, r):
# Extract relevant info about symbol that's being relocated
s = r.sym
s_bind = s.entry["st_info"]["bind"]
s_shndx = s.entry["st_shndx"]
s_type = s.entry["st_info"]["type"]
r_offset = r["r_offset"] + text_addr
r_info_type = r["r_info_type"]
try:
# only for RELA sections
r_addend = r["r_addend"]
except KeyError:
r_addend = 0
# Default relocation type and name for logging
reloc_type = "le32"
log_name = None
if (
env.arch.name == "EM_386"
and r_info_type in (R_386_PC32, R_386_PLT32)
or env.arch.name == "EM_X86_64"
and r_info_type in (R_X86_64_PC32, R_X86_64_PLT32)
or env.arch.name == "EM_ARM"
and r_info_type in (R_ARM_REL32, R_ARM_THM_CALL, R_ARM_THM_JUMP24)
or s_bind == "STB_LOCAL"
and env.arch.name == "EM_XTENSA"
and r_info_type == R_XTENSA_32 # not GOT
):
# Standard relocation to fixed location within text/rodata
if hasattr(s, "resolved"):
s = s.resolved
sec = s.section
if env.arch.separate_rodata and sec.name.startswith(".rodata"):
raise LinkError("fixed relocation to rodata with rodata referenced via GOT")
if sec.name.startswith(".bss"):
raise LinkError(
"{}: fixed relocation to bss (bss variables can't be static)".format(s.filename)
)
if sec.name.startswith(".external"):
raise LinkError(
"{}: fixed relocation to external symbol: {}".format(s.filename, s.name)
)
addr = sec.addr + s["st_value"]
reloc = addr - r_offset + r_addend
if r_info_type in (R_ARM_THM_CALL, R_ARM_THM_JUMP24):
# Both relocations have the same bit pattern to rewrite:
# R_ARM_THM_CALL: bl
# R_ARM_THM_JUMP24: b.w
reloc_type = "thumb_b"
elif (
env.arch.name == "EM_386"
and r_info_type == R_386_GOTPC
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_BASE_PREL
):
# Relocation to GOT address itself
assert s.name == "_GLOBAL_OFFSET_TABLE_"
addr = env.got_section.addr
reloc = addr - r_offset + r_addend
elif (
env.arch.name == "EM_386"
and r_info_type in (R_386_GOT32, R_386_GOT32X)
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_GOT_BREL
):
# Relcation pointing to GOT
reloc = addr = env.got_entries[s.name].offset
elif env.arch.name == "EM_X86_64" and r_info_type == R_X86_64_REX_GOTPCRELX:
# Relcation pointing to GOT
got_entry = env.got_entries[s.name]
addr = env.got_section.addr + got_entry.offset
reloc = addr - r_offset + r_addend
elif env.arch.name == "EM_386" and r_info_type == R_386_GOTOFF:
# Relocation relative to GOT
addr = s.section.addr + s["st_value"]
reloc = addr - env.got_section.addr + r_addend
elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_SLOT0_OP:
# Relocation pointing to GOT, xtensa specific
sec = s.section
if sec.name.startswith(".text"):
# it looks like R_XTENSA_SLOT0_OP into .text is already correctly relocated
return
assert sec.name.startswith(".literal"), sec.name
lit_idx = "{}+0x{:x}".format(sec.filename, r_addend)
lit_ptr = env.xt_literals[lit_idx]
if isinstance(lit_ptr, str):
addr = env.got_section.addr + env.got_entries[lit_ptr].offset
log_name = "GOT {}".format(lit_ptr)
else:
addr = env.lit_section.addr + env.lit_entries[lit_ptr].offset
log_name = "LIT"
reloc = addr - r_offset
reloc_type = "xtensa_l32r"
elif env.arch.name == "EM_XTENSA" and r_info_type == R_XTENSA_DIFF32:
if s.section.name.startswith(".text"):
# it looks like R_XTENSA_DIFF32 into .text is already correctly relocated
return
assert 0
else:
# Unknown/unsupported relocation
assert 0, r_info_type
# Write relocation
if reloc_type == "le32":
(existing,) = struct.unpack_from("<I", env.full_text, r_offset)
struct.pack_into("<I", env.full_text, r_offset, (existing + reloc) & 0xFFFFFFFF)
elif reloc_type == "thumb_b":
b_h, b_l = struct.unpack_from("<HH", env.full_text, r_offset)
existing = (b_h & 0x7FF) << 12 | (b_l & 0x7FF) << 1
if existing >= 0x400000: # 2's complement
existing -= 0x800000
new = existing + reloc
b_h = (b_h & 0xF800) | (new >> 12) & 0x7FF
b_l = (b_l & 0xF800) | (new >> 1) & 0x7FF
struct.pack_into("<HH", env.full_text, r_offset, b_h, b_l)
elif reloc_type == "xtensa_l32r":
l32r = unpack_u24le(env.full_text, r_offset)
assert l32r & 0xF == 1 # RI16 encoded l32r
l32r_imm16 = l32r >> 8
l32r_imm16 = (l32r_imm16 + reloc >> 2) & 0xFFFF
l32r = l32r & 0xFF | l32r_imm16 << 8
pack_u24le(env.full_text, r_offset, l32r)
else:
assert 0, reloc_type
# Log information about relocation
if log_name is None:
if s_type == "STT_SECTION":
log_name = s.section.name
else:
log_name = s.name
log(LOG_LEVEL_3, " {:08x} {} -> {:08x}".format(r_offset, log_name, addr))
def do_relocation_data(env, text_addr, r):
s = r.sym
s_type = s.entry["st_info"]["type"]
r_offset = r["r_offset"] + text_addr
r_info_type = r["r_info_type"]
try:
# only for RELA sections
r_addend = r["r_addend"]
except KeyError:
r_addend = 0
if (
env.arch.name == "EM_386"
and r_info_type == R_386_32
or env.arch.name == "EM_X86_64"
and r_info_type == R_X86_64_64
or env.arch.name == "EM_ARM"
and r_info_type == R_ARM_ABS32
or env.arch.name == "EM_XTENSA"
and r_info_type == R_XTENSA_32
):
# Relocation in data.rel.ro to internal/external symbol
if env.arch.word_size == 4:
struct_type = "<I"
elif env.arch.word_size == 8:
struct_type = "<Q"
sec = s.section
assert r_offset % env.arch.word_size == 0
addr = sec.addr + s["st_value"] + r_addend
if s_type == "STT_SECTION":
log_name = sec.name
else:
log_name = s.name
log(LOG_LEVEL_3, " {:08x} -> {} {:08x}".format(r_offset, log_name, addr))
if env.arch.separate_rodata:
data = env.full_rodata
else:
data = env.full_text
(existing,) = struct.unpack_from(struct_type, data, r_offset)
if sec.name.startswith((".text", ".rodata", ".data.rel.ro", ".bss")):
struct.pack_into(struct_type, data, r_offset, existing + addr)
kind = sec.name
elif sec.name == ".external.mp_fun_table":
assert addr == 0
kind = s.mp_fun_table_offset
else:
assert 0, sec.name
if env.arch.separate_rodata:
base = ".rodata"
else:
base = ".text"
env.mpy_relocs.append((base, r_offset, kind))
else:
# Unknown/unsupported relocation
assert 0, r_info_type
def load_object_file(env, felf):
with open(felf, "rb") as f:
elf = elffile.ELFFile(f)
env.check_arch(elf["e_machine"])
# Get symbol table
symtab = list(elf.get_section_by_name(".symtab").iter_symbols())
# Load needed sections from ELF file
sections_shndx = {} # maps elf shndx to Section object
for idx, s in enumerate(elf.iter_sections()):
if s.header.sh_type in ("SHT_PROGBITS", "SHT_NOBITS"):
if s.data_size == 0:
# Ignore empty sections
pass
elif s.name.startswith((".literal", ".text", ".rodata", ".data.rel.ro", ".bss")):
sec = Section.from_elfsec(s, felf)
sections_shndx[idx] = sec
if s.name.startswith(".literal"):
env.literal_sections.append(sec)
else:
env.sections.append(sec)
elif s.name.startswith(".data"):
raise LinkError("{}: {} non-empty".format(felf, s.name))
else:
# Ignore section
pass
elif s.header.sh_type in ("SHT_REL", "SHT_RELA"):
shndx = s.header.sh_info
if shndx in sections_shndx:
sec = sections_shndx[shndx]
sec.reloc_name = s.name
sec.reloc = list(s.iter_relocations())
for r in sec.reloc:
r.sym = symtab[r["r_info_sym"]]
# Link symbols to their sections, and update known and unresolved symbols
for sym in symtab:
sym.filename = felf
shndx = sym.entry["st_shndx"]
if shndx in sections_shndx:
# Symbol with associated section
sym.section = sections_shndx[shndx]
if sym["st_info"]["bind"] == "STB_GLOBAL":
# Defined global symbol
if sym.name in env.known_syms and not sym.name.startswith(
"__x86.get_pc_thunk."
):
raise LinkError("duplicate symbol: {}".format(sym.name))
env.known_syms[sym.name] = sym
elif sym.entry["st_shndx"] == "SHN_UNDEF" and sym["st_info"]["bind"] == "STB_GLOBAL":
# Undefined global symbol, needs resolving
env.unresolved_syms.append(sym)
def link_objects(env, native_qstr_vals_len, native_qstr_objs_len):
# Build GOT information
if env.arch.name == "EM_XTENSA":
build_got_xtensa(env)
else:
build_got_generic(env)
# Creat GOT section
got_size = len(env.got_entries) * env.arch.word_size
env.got_section = Section("GOT", bytearray(got_size), env.arch.word_size)
if env.arch.name == "EM_XTENSA":
env.sections.insert(0, env.got_section)
else:
env.sections.append(env.got_section)
# Create optional literal section
if env.arch.name == "EM_XTENSA":
lit_size = len(env.lit_entries) * env.arch.word_size
env.lit_section = Section("LIT", bytearray(lit_size), env.arch.word_size)
env.sections.insert(1, env.lit_section)
# Create section to contain mp_native_qstr_val_table
env.qstr_val_section = Section(
".text.QSTR_VAL",
bytearray(native_qstr_vals_len * env.arch.qstr_entry_size),
env.arch.qstr_entry_size,
)
env.sections.append(env.qstr_val_section)
# Create section to contain mp_native_qstr_obj_table
env.qstr_obj_section = Section(
".text.QSTR_OBJ", bytearray(native_qstr_objs_len * env.arch.word_size), env.arch.word_size
)
env.sections.append(env.qstr_obj_section)
# Resolve unknown symbols
mp_fun_table_sec = Section(".external.mp_fun_table", b"", 0)
fun_table = {
key: 68 + idx
for idx, key in enumerate(
[
"mp_type_type",
"mp_type_str",
"mp_type_list",
"mp_type_dict",
"mp_type_fun_builtin_0",
"mp_type_fun_builtin_1",
"mp_type_fun_builtin_2",
"mp_type_fun_builtin_3",
"mp_type_fun_builtin_var",
"mp_stream_read_obj",
"mp_stream_readinto_obj",
"mp_stream_unbuffered_readline_obj",
"mp_stream_write_obj",
]
)
}
for sym in env.unresolved_syms:
assert sym["st_value"] == 0
if sym.name == "_GLOBAL_OFFSET_TABLE_":
pass
elif sym.name == "mp_fun_table":
sym.section = Section(".external", b"", 0)
elif sym.name == "mp_native_qstr_val_table":
sym.section = env.qstr_val_section
elif sym.name == "mp_native_qstr_obj_table":
sym.section = env.qstr_obj_section
elif sym.name in env.known_syms:
sym.resolved = env.known_syms[sym.name]
else:
if sym.name in fun_table:
sym.section = mp_fun_table_sec
sym.mp_fun_table_offset = fun_table[sym.name]
else:
raise LinkError("{}: undefined symbol: {}".format(sym.filename, sym.name))
# Align sections, assign their addresses, and create full_text
env.full_text = bytearray(env.arch.asm_jump(8)) # dummy, to be filled in later
env.full_rodata = bytearray(0)
env.full_bss = bytearray(0)
for sec in env.sections:
if env.arch.separate_rodata and sec.name.startswith((".rodata", ".data.rel.ro")):
data = env.full_rodata
elif sec.name.startswith(".bss"):
data = env.full_bss
else:
data = env.full_text
sec.addr = align_to(len(data), sec.alignment)
data.extend(b"\x00" * (sec.addr - len(data)))
data.extend(sec.data)
env.print_sections()
populate_got(env)
if env.arch.name == "EM_XTENSA":
populate_lit(env)
# Fill in relocations
for sec in env.sections:
if not sec.reloc:
continue
log(
LOG_LEVEL_3,
"{}: {} relocations via {}:".format(sec.filename, sec.name, sec.reloc_name),
)
for r in sec.reloc:
if sec.name.startswith((".text", ".rodata")):
do_relocation_text(env, sec.addr, r)
elif sec.name.startswith(".data.rel.ro"):
do_relocation_data(env, sec.addr, r)
else:
assert 0, sec.name
################################################################################
# .mpy output
class MPYOutput:
def open(self, fname):
self.f = open(fname, "wb")
self.prev_base = -1
self.prev_offset = -1
def close(self):
self.f.close()
def write_bytes(self, buf):
self.f.write(buf)
def write_uint(self, val):
b = bytearray()
b.insert(0, val & 0x7F)
val >>= 7
while val:
b.insert(0, 0x80 | (val & 0x7F))
val >>= 7
self.write_bytes(b)
def write_qstr(self, s):
if s in qstrutil.static_qstr_list:
self.write_bytes(bytes([0, qstrutil.static_qstr_list.index(s) + 1]))
else:
s = bytes(s, "ascii")
self.write_uint(len(s) << 1)
self.write_bytes(s)
def write_reloc(self, base, offset, dest, n):
need_offset = not (base == self.prev_base and offset == self.prev_offset + 1)
self.prev_offset = offset + n - 1
if dest <= 2:
dest = (dest << 1) | (n > 1)
else:
assert 6 <= dest <= 127
assert n == 1
dest = dest << 1 | need_offset
assert 0 <= dest <= 0xFE, dest
self.write_bytes(bytes([dest]))
if need_offset:
if base == ".text":
base = 0
elif base == ".rodata":
base = 1
self.write_uint(offset << 1 | base)
if n > 1:
self.write_uint(n)
def build_mpy(env, entry_offset, fmpy, native_qstr_vals, native_qstr_objs):
# Write jump instruction to start of text
jump = env.arch.asm_jump(entry_offset)
env.full_text[: len(jump)] = jump
log(LOG_LEVEL_1, "arch: {}".format(env.arch.name))
log(LOG_LEVEL_1, "text size: {}".format(len(env.full_text)))
if len(env.full_rodata):
log(LOG_LEVEL_1, "rodata size: {}".format(len(env.full_rodata)))
log(LOG_LEVEL_1, "bss size: {}".format(len(env.full_bss)))
log(LOG_LEVEL_1, "GOT entries: {}".format(len(env.got_entries)))
# xxd(env.full_text)
out = MPYOutput()
out.open(fmpy)
# MPY: header
out.write_bytes(
bytearray(
[
ord("C"),
MPY_VERSION,
env.arch.mpy_feature,
MP_SMALL_INT_BITS,
QSTR_WINDOW_SIZE,
]
)
)
# MPY: kind/len
out.write_uint(len(env.full_text) << 2 | (MP_CODE_NATIVE_VIPER - MP_CODE_BYTECODE))
# MPY: machine code
out.write_bytes(env.full_text)
# MPY: n_qstr_link (assumes little endian)
out.write_uint(len(native_qstr_vals) + len(native_qstr_objs))
for q in range(len(native_qstr_vals)):
off = env.qstr_val_section.addr + q * env.arch.qstr_entry_size
out.write_uint(off << 2)
out.write_qstr(native_qstr_vals[q])
for q in range(len(native_qstr_objs)):
off = env.qstr_obj_section.addr + q * env.arch.word_size
out.write_uint(off << 2 | 3)
out.write_qstr(native_qstr_objs[q])
# MPY: scope_flags
scope_flags = MP_SCOPE_FLAG_VIPERRELOC
if len(env.full_rodata):
scope_flags |= MP_SCOPE_FLAG_VIPERRODATA
if len(env.full_bss):
scope_flags |= MP_SCOPE_FLAG_VIPERBSS
out.write_uint(scope_flags)
# MPY: n_obj
out.write_uint(0)
# MPY: n_raw_code
out.write_uint(0)
# MPY: rodata and/or bss
if len(env.full_rodata):
rodata_const_table_idx = 1
out.write_uint(len(env.full_rodata))
out.write_bytes(env.full_rodata)
if len(env.full_bss):
bss_const_table_idx = bool(env.full_rodata) + 1
out.write_uint(len(env.full_bss))
# MPY: relocation information
prev_kind = None
for base, addr, kind in env.mpy_relocs:
if isinstance(kind, str) and kind.startswith(".text"):
kind = 0
elif kind in (".rodata", ".data.rel.ro"):
if env.arch.separate_rodata:
kind = rodata_const_table_idx
else:
kind = 0
elif isinstance(kind, str) and kind.startswith(".bss"):
kind = bss_const_table_idx
elif kind == "mp_fun_table":
kind = 6
else:
kind = 7 + kind
assert addr % env.arch.word_size == 0, addr
offset = addr // env.arch.word_size
if kind == prev_kind and base == prev_base and offset == prev_offset + 1:
prev_n += 1
prev_offset += 1
else:
if prev_kind is not None:
out.write_reloc(prev_base, prev_offset - prev_n + 1, prev_kind, prev_n)
prev_kind = kind
prev_base = base
prev_offset = offset
prev_n = 1
if prev_kind is not None:
out.write_reloc(prev_base, prev_offset - prev_n + 1, prev_kind, prev_n)
# MPY: sentinel for end of relocations
out.write_bytes(b"\xff")
out.close()
################################################################################
# main
def do_preprocess(args):
if args.output is None:
assert args.files[0].endswith(".c")
args.output = args.files[0][:-1] + "config.h"
static_qstrs, qstr_vals, qstr_objs = extract_qstrs(args.files)
with open(args.output, "w") as f:
print(
"#include <stdint.h>\n"
"typedef uintptr_t mp_uint_t;\n"
"typedef intptr_t mp_int_t;\n"
"typedef uintptr_t mp_off_t;",
file=f,
)
for i, q in enumerate(static_qstrs):
print("#define %s (%u)" % (q, i + 1), file=f)
for i, q in enumerate(sorted(qstr_vals)):
print("#define %s (mp_native_qstr_val_table[%d])" % (q, i), file=f)
for i, q in enumerate(sorted(qstr_objs)):
print(
"#define MP_OBJ_NEW_QSTR_%s ((mp_obj_t)mp_native_qstr_obj_table[%d])" % (q, i),
file=f,
)
if args.arch == "xtensawin":
qstr_type = "uint32_t" # esp32 can only read 32-bit values from IRAM
else:
qstr_type = "uint16_t"
print("extern const {} mp_native_qstr_val_table[];".format(qstr_type), file=f)
print("extern const mp_uint_t mp_native_qstr_obj_table[];", file=f)
def do_link(args):
if args.output is None:
assert args.files[0].endswith(".o")
args.output = args.files[0][:-1] + "mpy"
native_qstr_vals = []
native_qstr_objs = []
if args.qstrs is not None:
with open(args.qstrs) as f:
for l in f:
m = re.match(r"#define MP_QSTR_([A-Za-z0-9_]*) \(mp_native_", l)
if m:
native_qstr_vals.append(m.group(1))
else:
m = re.match(r"#define MP_OBJ_NEW_QSTR_MP_QSTR_([A-Za-z0-9_]*)", l)
if m:
native_qstr_objs.append(m.group(1))
log(LOG_LEVEL_2, "qstr vals: " + ", ".join(native_qstr_vals))
log(LOG_LEVEL_2, "qstr objs: " + ", ".join(native_qstr_objs))
env = LinkEnv(args.arch)
try:
for file in args.files:
load_object_file(env, file)
link_objects(env, len(native_qstr_vals), len(native_qstr_objs))
build_mpy(env, env.find_addr("mpy_init"), args.output, native_qstr_vals, native_qstr_objs)
except LinkError as er:
print("LinkError:", er.args[0])
sys.exit(1)
def main():
import argparse
cmd_parser = argparse.ArgumentParser(description="Run scripts on the pyboard.")
cmd_parser.add_argument(
"--verbose", "-v", action="count", default=1, help="increase verbosity"
)
cmd_parser.add_argument("--arch", default="x64", help="architecture")
cmd_parser.add_argument("--preprocess", action="store_true", help="preprocess source files")
cmd_parser.add_argument("--qstrs", default=None, help="file defining additional qstrs")
cmd_parser.add_argument(
"--output", "-o", default=None, help="output .mpy file (default to input with .o->.mpy)"
)
cmd_parser.add_argument("files", nargs="+", help="input files")
args = cmd_parser.parse_args()
global log_level
log_level = args.verbose
if args.preprocess:
do_preprocess(args)
else:
do_link(args)
if __name__ == "__main__":
main()
| 1.539063 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.