code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import hashlib
from core.analytics import InlineAnalytics
from core.observables import Hash
HASH_TYPES_DICT = {
"md5": hashlib.md5,
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"sha512": hashlib.sha512,
}
class HashFile(InlineAnalytics):
default_values = {
"name": "HashFile",
"description": "Extracts MD5, SHA1, SHA256, SHA512 hashes from file",
}
ACTS_ON = ["File", "Certificate"]
@staticmethod
def each(f):
if f.body:
f.hashes = []
for hash_type, h in HashFile.extract_hashes(f.body.contents):
hash_object = Hash.get_or_create(value=h.hexdigest())
hash_object.add_source("analytics")
hash_object.save()
f.active_link_to(
hash_object,
"{} hash".format(hash_type.upper()),
"HashFile",
clean_old=False,
)
f.hashes.append({"hash": hash_type, "value": h.hexdigest()})
f.save()
@staticmethod
def extract_hashes(body_contents):
hashers = {k: HASH_TYPES_DICT[k]() for k in HASH_TYPES_DICT}
while True:
chunk = body_contents.read(512 * 16)
if not chunk:
break
for h in hashers.values():
h.update(chunk)
return hashers.items()
| yeti-platform/yeti | plugins/analytics/public/hash_file.py | Python | apache-2.0 | 1,405 |
import zerorpc
import gevent.queue
import logging
import sys
logging.basicConfig()
# root logger
logger = logging.getLogger()
# set the mimimum level for root logger so it will be possible for a client
# to subscribe and receive logs for any log level
logger.setLevel(0)
class QueueingLogHandler(logging.Handler):
""" A simple logging handler which puts all emitted logs into a
gevent queue.
"""
def __init__(self, queue, level, formatter):
super(QueueingLogHandler, self).__init__()
self._queue = queue
self.setLevel(level)
self.setFormatter(formatter)
def emit(self, record):
msg = self.format(record)
self._queue.put_nowait(msg)
def close(self):
super(QueueingLogHandler, self).close()
self._queue.put_nowait(None)
@property
def emitted(self):
return self._queue
class TestService(object):
_HANDLER_CLASS = QueueingLogHandler
_DEFAULT_FORMAT = '%(name)s - %(levelname)s - %(asctime)s - %(message)s'
logger = logging.getLogger("service")
def __init__(self):
self._logging_handlers = set()
def test(self, logger_name, logger_level, message):
logger = logging.getLogger(logger_name)
getattr(logger, logger_level.lower())(message)
def available_loggers(self):
""" List of initalized loggers """
return logging.getLogger().manager.loggerDict.keys()
def close_log_streams(self):
""" Closes all log_stream streams. """
while self._logging_handlers:
self._logging_handlers.pop().close()
@zerorpc.stream
def log_stream(self, logger_name, level_name, format_str):
""" Attaches a log handler to the specified logger and sends emitted logs
back as stream.
"""
if logger_name != "" and logger_name not in self.available_loggers():
raise ValueError("logger {0} is not available".format(logger_name))
level_name_upper = level_name.upper() if level_name else "NOTSET"
try:
level = getattr(logging, level_name_upper)
except AttributeError, e:
raise AttributeError("log level {0} is not available".format(level_name_upper))
q = gevent.queue.Queue()
fmt = format_str if format_str.strip() else self._DEFAULT_FORMAT
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(fmt)
handler = self._HANDLER_CLASS(q, level, formatter)
logger.addHandler(handler)
self._logging_handlers.add(handler)
self.logger.debug("new subscriber for {0}/{1}".format(logger_name or "root", level_name_upper))
try:
for msg in handler.emitted:
if msg is None:
return
yield msg
finally:
self._logging_handlers.discard(handler)
handler.close()
self.logger.debug("subscription finished for {0}/{1}".format(logger_name or "root", level_name_upper))
if __name__ == "__main__":
service = TestService()
server = zerorpc.Server(service)
server.bind(sys.argv[1])
logger.warning("starting service")
try:
server.run()
except BaseException, e:
logger.error(str(e))
finally:
logger.warning("shutting down")
| benctamas/zerorpc-logging | logstream_test.py | Python | apache-2.0 | 3,399 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.gaming.v1",
manifest={
"OperationMetadata",
"OperationStatus",
"LabelSelector",
"RealmSelector",
"Schedule",
"SpecSource",
"TargetDetails",
"TargetState",
"DeployedFleetDetails",
},
)
class OperationMetadata(proto.Message):
r"""Represents the metadata of the long-running operation.
Attributes:
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the operation was
created.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time the operation finished
running.
target (str):
Output only. Server-defined resource path for
the target of the operation.
verb (str):
Output only. Name of the verb executed by the
operation.
status_message (str):
Output only. Human-readable status of the
operation, if any.
requested_cancellation (bool):
Output only. Identifies whether the user has requested
cancellation of the operation. Operations that have
successfully been cancelled have [Operation.error][] value
with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``.
api_version (str):
Output only. API version used to start the
operation.
unreachable (Sequence[str]):
Output only. List of Locations that could not
be reached.
operation_status (Sequence[google.cloud.gaming_v1.types.OperationMetadata.OperationStatusEntry]):
Output only. Operation status for Game
Services API operations. Operation status is in
the form of key-value pairs where keys are
resource IDs and the values show the status of
the operation. In case of failures, the value
includes an error code and error message.
"""
create_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
target = proto.Field(proto.STRING, number=3,)
verb = proto.Field(proto.STRING, number=4,)
status_message = proto.Field(proto.STRING, number=5,)
requested_cancellation = proto.Field(proto.BOOL, number=6,)
api_version = proto.Field(proto.STRING, number=7,)
unreachable = proto.RepeatedField(proto.STRING, number=8,)
operation_status = proto.MapField(
proto.STRING, proto.MESSAGE, number=9, message="OperationStatus",
)
class OperationStatus(proto.Message):
r"""
Attributes:
done (bool):
Output only. Whether the operation is done or
still in progress.
error_code (google.cloud.gaming_v1.types.OperationStatus.ErrorCode):
The error code in case of failures.
error_message (str):
The human-readable error message.
"""
class ErrorCode(proto.Enum):
r""""""
ERROR_CODE_UNSPECIFIED = 0
INTERNAL_ERROR = 1
PERMISSION_DENIED = 2
CLUSTER_CONNECTION = 3
done = proto.Field(proto.BOOL, number=1,)
error_code = proto.Field(proto.ENUM, number=2, enum=ErrorCode,)
error_message = proto.Field(proto.STRING, number=3,)
class LabelSelector(proto.Message):
r"""The label selector, used to group labels on the resources.
Attributes:
labels (Sequence[google.cloud.gaming_v1.types.LabelSelector.LabelsEntry]):
Resource labels for this selector.
"""
labels = proto.MapField(proto.STRING, proto.STRING, number=1,)
class RealmSelector(proto.Message):
r"""The realm selector, used to match realm resources.
Attributes:
realms (Sequence[str]):
List of realms to match.
"""
realms = proto.RepeatedField(proto.STRING, number=1,)
class Schedule(proto.Message):
r"""The schedule of a recurring or one time event. The event's time span
is specified by start_time and end_time. If the scheduled event's
timespan is larger than the cron_spec + cron_job_duration, the event
will be recurring. If only cron_spec + cron_job_duration are
specified, the event is effective starting at the local time
specified by cron_spec, and is recurring.
::
start_time|-------[cron job]-------[cron job]-------[cron job]---|end_time
cron job: cron spec start time + duration
Attributes:
start_time (google.protobuf.timestamp_pb2.Timestamp):
The start time of the event.
end_time (google.protobuf.timestamp_pb2.Timestamp):
The end time of the event.
cron_job_duration (google.protobuf.duration_pb2.Duration):
The duration for the cron job event. The
duration of the event is effective after the
cron job's start time.
cron_spec (str):
The cron definition of the scheduled event.
See https://en.wikipedia.org/wiki/Cron. Cron
spec specifies the local time as defined by the
realm.
"""
start_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
cron_job_duration = proto.Field(
proto.MESSAGE, number=3, message=duration_pb2.Duration,
)
cron_spec = proto.Field(proto.STRING, number=4,)
class SpecSource(proto.Message):
r"""Encapsulates Agones fleet spec and Agones autoscaler spec
sources.
Attributes:
game_server_config_name (str):
The game server config resource. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment_id}/configs/{config_id}``.
name (str):
The name of the Agones leet config or Agones
scaling config used to derive the Agones fleet
or Agones autoscaler spec.
"""
game_server_config_name = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=2,)
class TargetDetails(proto.Message):
r"""Details about the Agones resources.
Attributes:
game_server_cluster_name (str):
The game server cluster name. Uses the form:
``projects/{project}/locations/{location}/realms/{realm}/gameServerClusters/{cluster}``.
game_server_deployment_name (str):
The game server deployment name. Uses the form:
``projects/{project}/locations/{location}/gameServerDeployments/{deployment_id}``.
fleet_details (Sequence[google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails]):
Agones fleet details for game server clusters
and game server deployments.
"""
class TargetFleetDetails(proto.Message):
r"""Details of the target Agones fleet.
Attributes:
fleet (google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails.TargetFleet):
Reference to target Agones fleet.
autoscaler (google.cloud.gaming_v1.types.TargetDetails.TargetFleetDetails.TargetFleetAutoscaler):
Reference to target Agones fleet autoscaling
policy.
"""
class TargetFleet(proto.Message):
r"""Target Agones fleet specification.
Attributes:
name (str):
The name of the Agones fleet.
spec_source (google.cloud.gaming_v1.types.SpecSource):
Encapsulates the source of the Agones fleet
spec. The Agones fleet spec source.
"""
name = proto.Field(proto.STRING, number=1,)
spec_source = proto.Field(proto.MESSAGE, number=2, message="SpecSource",)
class TargetFleetAutoscaler(proto.Message):
r"""Target Agones autoscaler policy reference.
Attributes:
name (str):
The name of the Agones autoscaler.
spec_source (google.cloud.gaming_v1.types.SpecSource):
Encapsulates the source of the Agones fleet
spec. Details about the Agones autoscaler spec.
"""
name = proto.Field(proto.STRING, number=1,)
spec_source = proto.Field(proto.MESSAGE, number=2, message="SpecSource",)
fleet = proto.Field(
proto.MESSAGE,
number=1,
message="TargetDetails.TargetFleetDetails.TargetFleet",
)
autoscaler = proto.Field(
proto.MESSAGE,
number=2,
message="TargetDetails.TargetFleetDetails.TargetFleetAutoscaler",
)
game_server_cluster_name = proto.Field(proto.STRING, number=1,)
game_server_deployment_name = proto.Field(proto.STRING, number=2,)
fleet_details = proto.RepeatedField(
proto.MESSAGE, number=3, message=TargetFleetDetails,
)
class TargetState(proto.Message):
r"""Encapsulates the Target state.
Attributes:
details (Sequence[google.cloud.gaming_v1.types.TargetDetails]):
Details about Agones fleets.
"""
details = proto.RepeatedField(proto.MESSAGE, number=1, message="TargetDetails",)
class DeployedFleetDetails(proto.Message):
r"""Details of the deployed Agones fleet.
Attributes:
deployed_fleet (google.cloud.gaming_v1.types.DeployedFleetDetails.DeployedFleet):
Information about the Agones fleet.
deployed_autoscaler (google.cloud.gaming_v1.types.DeployedFleetDetails.DeployedFleetAutoscaler):
Information about the Agones autoscaler for
that fleet.
"""
class DeployedFleet(proto.Message):
r"""Agones fleet specification and details.
Attributes:
fleet (str):
The name of the Agones fleet.
fleet_spec (str):
The fleet spec retrieved from the Agones
fleet.
spec_source (google.cloud.gaming_v1.types.SpecSource):
The source spec that is used to create the
Agones fleet. The GameServerConfig resource may
no longer exist in the system.
status (google.cloud.gaming_v1.types.DeployedFleetDetails.DeployedFleet.DeployedFleetStatus):
The current status of the Agones fleet.
Includes count of game servers in various
states.
"""
class DeployedFleetStatus(proto.Message):
r"""DeployedFleetStatus has details about the Agones fleets such
as how many are running, how many allocated, and so on.
Attributes:
ready_replicas (int):
The number of GameServer replicas in the
READY state in this fleet.
allocated_replicas (int):
The number of GameServer replicas in the
ALLOCATED state in this fleet.
reserved_replicas (int):
The number of GameServer replicas in the
RESERVED state in this fleet. Reserved instances
won't be deleted on scale down, but won't cause
an autoscaler to scale up.
replicas (int):
The total number of current GameServer
replicas in this fleet.
"""
ready_replicas = proto.Field(proto.INT64, number=1,)
allocated_replicas = proto.Field(proto.INT64, number=2,)
reserved_replicas = proto.Field(proto.INT64, number=3,)
replicas = proto.Field(proto.INT64, number=4,)
fleet = proto.Field(proto.STRING, number=1,)
fleet_spec = proto.Field(proto.STRING, number=2,)
spec_source = proto.Field(proto.MESSAGE, number=3, message="SpecSource",)
status = proto.Field(
proto.MESSAGE,
number=5,
message="DeployedFleetDetails.DeployedFleet.DeployedFleetStatus",
)
class DeployedFleetAutoscaler(proto.Message):
r"""Details about the Agones autoscaler.
Attributes:
autoscaler (str):
The name of the Agones autoscaler.
spec_source (google.cloud.gaming_v1.types.SpecSource):
The source spec that is used to create the
autoscaler. The GameServerConfig resource may no
longer exist in the system.
fleet_autoscaler_spec (str):
The autoscaler spec retrieved from Agones.
"""
autoscaler = proto.Field(proto.STRING, number=1,)
spec_source = proto.Field(proto.MESSAGE, number=4, message="SpecSource",)
fleet_autoscaler_spec = proto.Field(proto.STRING, number=3,)
deployed_fleet = proto.Field(proto.MESSAGE, number=1, message=DeployedFleet,)
deployed_autoscaler = proto.Field(
proto.MESSAGE, number=2, message=DeployedFleetAutoscaler,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleapis/python-game-servers | google/cloud/gaming_v1/types/common.py | Python | apache-2.0 | 13,962 |
from changes.api.serializer import Crumbler, register
from changes.models.node import Cluster
@register(Cluster)
class ClusterCrumbler(Crumbler):
def crumble(self, instance, attrs):
return {
'id': instance.id.hex,
'name': instance.label,
'dateCreated': instance.date_created,
}
| dropbox/changes | changes/api/serializer/models/cluster.py | Python | apache-2.0 | 336 |
"""Tests for the CSRF helper."""
import unittest
import mock
import webapp2
import webtest
from ctc.helpers import csrf
from ctc.testing import testutil
MOCKED_TIME = 123
# Tests don't need docstrings, so pylint: disable=C0111
# Tests can test protected members, so pylint: disable=W0212
class CsrfTests(testutil.CtcTestCase):
# Helpers
class TestHandler(csrf.CsrfHandler):
"""A handler for testing whether or not requests are CSRF protected."""
def get(self):
self.response.write('CSRF Token:%s' % self.csrf_token)
def post(self):
pass
def put(self):
pass
def delete(self):
pass
def setUp(self):
super(CsrfTests, self).setUp()
# The CSRF library uses the time, so we mock it out.
self.time_mock = mock.Mock()
csrf.time = self.time_mock
self.time_mock.time = mock.Mock(return_value=MOCKED_TIME)
# The handler tests need a WSGIApplication.
app = webapp2.WSGIApplication([('/', self.TestHandler)])
self.testapp = webtest.TestApp(app)
def test_get_secret_key(self):
first_key = csrf._get_secret_key()
self.assertEqual(len(first_key), 32)
second_key = csrf._get_secret_key()
self.assertEqual(first_key, second_key)
def test_tokens_are_equal(self):
# It should fail if the tokens aren't equal length.
self.assertFalse(csrf._tokens_are_equal('a', 'ab'))
# It should fail if the tokens are different.
self.assertFalse(csrf._tokens_are_equal('abcde', 'abcdf'))
# It should succeed if the tokens are the same.
self.assertTrue(csrf._tokens_are_equal('abcde', 'abcde'))
# Make Token
def test_make_token_includes_time(self):
self.login()
# It should get the current time.
token1 = csrf.make_token()
self.assertEqual(token1.split()[-1], str(MOCKED_TIME))
# It should use the provided time.
token2 = csrf.make_token(token_time='456')
self.assertEqual(token2.split()[-1], '456')
# Different time should cause the digest to be different.
self.assertNotEqual(token1.split()[0], token2.split()[0])
token3 = csrf.make_token(token_time='456')
self.assertEqual(token2, token3)
def test_make_token_requires_login(self):
token1 = csrf.make_token()
self.assertIsNone(token1)
self.login()
token2 = csrf.make_token()
self.assertIsNotNone(token2)
def test_make_token_includes_path(self):
self.login()
# It should get the current path.
self.testbed.setup_env(PATH_INFO='/action/1', overwrite=True)
token1 = csrf.make_token(token_time='123')
self.testbed.setup_env(PATH_INFO='/action/23', overwrite=True)
token2 = csrf.make_token(token_time='123')
token3 = csrf.make_token(token_time='123')
self.assertNotEqual(token1, token2)
self.assertEqual(token2, token3)
# It should let the client pass in a path.
token4 = csrf.make_token(path='/action/4', token_time='123')
token5 = csrf.make_token(path='/action/56', token_time='123')
token6 = csrf.make_token(path='/action/56', token_time='123')
self.assertNotEqual(token4, token5)
self.assertEqual(token5, token6)
# Token Is Valid
def test_token_is_valid(self):
self.login()
# Token is required.
self.assertFalse(csrf.token_is_valid(None))
# Token needs to have a timestamp on it.
self.assertFalse(csrf.token_is_valid('hello'))
# The timestamp needs to be within the current date range.
self.time_mock.time = mock.Mock(return_value=9999999999999)
self.assertFalse(csrf.token_is_valid('hello 123'))
# The user needs to be logged in.
token = csrf.make_token()
self.logout()
self.assertFalse(csrf.token_is_valid(token))
self.login()
# Modifying the token should break everything.
modified_token = '0' + token[1:]
if token == modified_token:
modified_token = '1' + token[1:]
self.assertFalse(csrf.token_is_valid(modified_token))
# The original token that we got should work.
self.assertTrue(csrf.token_is_valid(token))
def test_get_has_csrf_token(self):
self.login()
response = self.testapp.get('/', status=200).body
self.assertIn('CSRF Token:', response)
self.assertEqual(response.split(':')[-1], csrf.make_token())
def test_mutators_require_csrf_token(self):
self.login()
self.testapp.put('/', status=403)
self.testapp.post('/', status=403)
self.testapp.delete('/', status=403)
csrf_param = 'csrf_token=' + csrf.make_token(path='/')
self.testapp.put('/', params=csrf_param, status=200)
self.testapp.post('/', params=csrf_param, status=200)
# Though the spec allows DELETE to have a body, it tends to be ignored
# by servers (http://stackoverflow.com/questions/299628), and webapp2
# ignores it as well, so we have to put the params in the URL.
self.testapp.delete('/?' + csrf_param, status=200)
if __name__ == '__main__':
unittest.main()
| samking/code-the-change-projects | ctc/helpers/csrf_test.py | Python | apache-2.0 | 5,296 |
# encoding: utf-8
u'''MCL — Publication Folder'''
from ._base import IIngestableFolder, Ingestor, IngestableFolderView
from .interfaces import IPublication
from five import grok
class IPublicationFolder(IIngestableFolder):
u'''Folder containing publications.'''
class PublicationIngestor(Ingestor):
u'''RDF ingestor for publication.'''
grok.context(IPublicationFolder)
def getContainedObjectInterface(self):
return IPublication
class View(IngestableFolderView):
u'''View for an publication folder'''
grok.context(IPublicationFolder)
| MCLConsortium/mcl-site | src/jpl.mcl.site.knowledge/src/jpl/mcl/site/knowledge/publicationfolder.py | Python | apache-2.0 | 575 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.exterior_equipment import ExteriorFuelEquipment
log = logging.getLogger(__name__)
class TestExteriorFuelEquipment(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_exteriorfuelequipment(self):
pyidf.validation_level = ValidationLevel.error
obj = ExteriorFuelEquipment()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_fuel_use_type = "Electricity"
obj.fuel_use_type = var_fuel_use_type
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# real
var_design_level = 0.0
obj.design_level = var_design_level
# alpha
var_enduse_subcategory = "End-Use Subcategory"
obj.enduse_subcategory = var_enduse_subcategory
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.exteriorfuelequipments[0].name, var_name)
self.assertEqual(idf2.exteriorfuelequipments[0].fuel_use_type, var_fuel_use_type)
self.assertEqual(idf2.exteriorfuelequipments[0].schedule_name, var_schedule_name)
self.assertAlmostEqual(idf2.exteriorfuelequipments[0].design_level, var_design_level)
self.assertEqual(idf2.exteriorfuelequipments[0].enduse_subcategory, var_enduse_subcategory) | rbuffat/pyidf | tests/test_exteriorfuelequipment.py | Python | apache-2.0 | 1,733 |
from collections import defaultdict
import codecs
def count(corpus, output_file):
debug = False
dic = defaultdict(int)
other = set()
fout = codecs.open(output_file, 'w', 'utf8')
for line in open(corpus, 'r'):
words = line.split()
for word in words:
if len(word) % 3 == 0:
for i in xrange(len(word) / 3):
dic[word[i:i+3]] += 1
else:
other.add(word)
fout.write('%i %i\n' % (len(dic), len(other)))
record_list = [(y, x) for x, y in dic.items()]
record_list.sort()
record_list.reverse()
i = 0
for x, y in record_list:
#print y.decode('utf8'), x
try:
yy = y.decode('GBK')
except:
print y
yy = 'N/A'
fout.write('%s %i\n' % (yy, x))
i += 1
if i > 10 and debug:
break
other_list = list(other)
other_list.sort()
for item in other_list:
#print item.decode('utf8')
item2 = item.decode('utf8')
fout.write(item2)
fout.write('\n')
i += 1
if i > 20 and debug:
break
fout.close()
if __name__ =='__main__':
count('data/train.zh_parsed', 'output/count.zh')
count('data/train.ja_parsed', 'output/count.ja')
| jileiwang/CJ-Glo | tools/character_count.py | Python | apache-2.0 | 1,312 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import io
import json
import time
try:
import fastavro
except ImportError: # pragma: NO COVER
fastavro = None
import google.api_core.exceptions
import google.rpc.error_details_pb2
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
_STREAM_RESUMPTION_EXCEPTIONS = (
google.api_core.exceptions.ServiceUnavailable,
# Caused by transport-level error. No status code was received.
# https://github.com/googleapis/python-bigquery-storage/issues/262
google.api_core.exceptions.Unknown,
)
# The Google API endpoint can unexpectedly close long-running HTTP/2 streams.
# Unfortunately, this condition is surfaced to the caller as an internal error
# by gRPC. We don't want to resume on all internal errors, so instead we look
# for error message that we know are caused by problems that are safe to
# reconnect.
_STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES = (
# See: https://github.com/googleapis/google-cloud-python/pull/9994
"RST_STREAM",
)
_FASTAVRO_REQUIRED = (
"fastavro is required to parse ReadRowResponse messages with Avro bytes."
)
_PANDAS_REQUIRED = "pandas is required to create a DataFrame"
_PYARROW_REQUIRED = (
"pyarrow is required to parse ReadRowResponse messages with Arrow bytes."
)
class ReadRowsStream(object):
"""A stream of results from a read rows request.
This stream is an iterable of
:class:`~google.cloud.bigquery_storage_v1.types.ReadRowsResponse`.
Iterate over it to fetch all row messages.
If the fastavro library is installed, use the
:func:`~google.cloud.bigquery_storage_v1.reader.ReadRowsStream.rows()`
method to parse all messages into a stream of row dictionaries.
If the pandas and fastavro libraries are installed, use the
:func:`~google.cloud.bigquery_storage_v1.reader.ReadRowsStream.to_dataframe()`
method to parse all messages into a :class:`pandas.DataFrame`.
This object should not be created directly, but is returned by
other methods in this library.
"""
def __init__(
self, client, name, offset, read_rows_kwargs, retry_delay_callback=None
):
"""Construct a ReadRowsStream.
Args:
client ( \
~google.cloud.bigquery_storage_v1.services. \
big_query_read.BigQueryReadClient \
):
A GAPIC client used to reconnect to a ReadRows stream. This
must be the GAPIC client to avoid a circular dependency on
this class.
name (str):
Required. Stream ID from which rows are being read.
offset (int):
Required. Position in the stream to start
reading from. The offset requested must be less than the last
row read from ReadRows. Requesting a larger offset is
undefined.
read_rows_kwargs (dict):
Keyword arguments to use when reconnecting to a ReadRows
stream.
retry_delay_callback (Optional[Callable[[float], None]]):
If the client receives a retryable error that asks the client to
delay its next attempt and retry_delay_callback is not None,
ReadRowsStream will call retry_delay_callback with the delay
duration (in seconds) before it starts sleeping until the next
attempt.
Returns:
Iterable[ \
~google.cloud.bigquery_storage.types.ReadRowsResponse \
]:
A sequence of row messages.
"""
# Make a copy of the read position so that we can update it without
# mutating the original input.
self._client = client
self._name = name
self._offset = offset
self._read_rows_kwargs = read_rows_kwargs
self._retry_delay_callback = retry_delay_callback
self._wrapped = None
def __iter__(self):
"""An iterable of messages.
Returns:
Iterable[ \
~google.cloud.bigquery_storage_v1.types.ReadRowsResponse \
]:
A sequence of row messages.
"""
# Infinite loop to reconnect on reconnectable errors while processing
# the row stream.
if self._wrapped is None:
self._reconnect()
while True:
try:
for message in self._wrapped:
rowcount = message.row_count
self._offset += rowcount
yield message
return # Made it through the whole stream.
except google.api_core.exceptions.InternalServerError as exc:
resumable_error = any(
resumable_message in exc.message
for resumable_message in _STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES
)
if not resumable_error:
raise
except _STREAM_RESUMPTION_EXCEPTIONS:
# Transient error, so reconnect to the stream.
pass
except Exception as exc:
if not self._resource_exhausted_exception_is_retryable(exc):
raise
self._reconnect()
def _reconnect(self):
"""Reconnect to the ReadRows stream using the most recent offset."""
while True:
try:
self._wrapped = self._client.read_rows(
read_stream=self._name,
offset=self._offset,
**self._read_rows_kwargs
)
break
except Exception as exc:
if not self._resource_exhausted_exception_is_retryable(exc):
raise
def _resource_exhausted_exception_is_retryable(self, exc):
if isinstance(exc, google.api_core.exceptions.ResourceExhausted):
# ResourceExhausted errors are only retried if a valid
# RetryInfo is provided with the error.
#
# TODO: Remove hasattr logic when we require google-api-core >= 2.2.0.
# ResourceExhausted added details/_details in google-api-core 2.2.0.
details = None
if hasattr(exc, "details"):
details = exc.details
elif hasattr(exc, "_details"):
details = exc._details
if details is not None:
for detail in details:
if isinstance(detail, google.rpc.error_details_pb2.RetryInfo):
retry_delay = detail.retry_delay
if retry_delay is not None:
delay = max(
0,
float(retry_delay.seconds)
+ (float(retry_delay.nanos) / 1e9),
)
if self._retry_delay_callback:
self._retry_delay_callback(delay)
time.sleep(delay)
return True
return False
def rows(self, read_session=None):
"""Iterate over all rows in the stream.
This method requires the fastavro library in order to parse row
messages in avro format. For arrow format messages, the pyarrow
library is required.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
read_session ( \
Optional[~google.cloud.bigquery_storage_v1.types.ReadSession] \
):
DEPRECATED.
This argument was used to specify the schema of the rows in the
stream, but now the first message in a read stream contains
this information.
Returns:
Iterable[Mapping]:
A sequence of rows, represented as dictionaries.
"""
return ReadRowsIterable(self, read_session=read_session)
def to_arrow(self, read_session=None):
"""Create a :class:`pyarrow.Table` of all rows in the stream.
This method requires the pyarrow library and a stream using the Arrow
format.
Args:
read_session ( \
~google.cloud.bigquery_storage_v1.types.ReadSession \
):
DEPRECATED.
This argument was used to specify the schema of the rows in the
stream, but now the first message in a read stream contains
this information.
Returns:
pyarrow.Table:
A table of all rows in the stream.
"""
return self.rows(read_session=read_session).to_arrow()
def to_dataframe(self, read_session=None, dtypes=None):
"""Create a :class:`pandas.DataFrame` of all rows in the stream.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row messages.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings.
Args:
read_session ( \
~google.cloud.bigquery_storage_v1.types.ReadSession \
):
DEPRECATED.
This argument was used to specify the schema of the rows in the
stream, but now the first message in a read stream contains
this information.
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
return self.rows(read_session=read_session).to_dataframe(dtypes=dtypes)
class ReadRowsIterable(object):
"""An iterable of rows from a read session.
Args:
reader (google.cloud.bigquery_storage_v1.reader.ReadRowsStream):
A read rows stream.
read_session ( \
Optional[~google.cloud.bigquery_storage_v1.types.ReadSession] \
):
DEPRECATED.
This argument was used to specify the schema of the rows in the
stream, but now the first message in a read stream contains
this information.
"""
# This class is modelled after the google.cloud.bigquery.table.RowIterator
# and aims to be API compatible where possible.
def __init__(self, reader, read_session=None):
self._reader = reader
if read_session is not None:
self._stream_parser = _StreamParser.from_read_session(read_session)
else:
self._stream_parser = None
@property
def pages(self):
"""A generator of all pages in the stream.
Returns:
types.GeneratorType[google.cloud.bigquery_storage_v1.ReadRowsPage]:
A generator of pages.
"""
# Each page is an iterator of rows. But also has num_items, remaining,
# and to_dataframe.
for message in self._reader:
# Only the first message contains the schema, which is needed to
# decode the messages.
if not self._stream_parser:
self._stream_parser = _StreamParser.from_read_rows_response(message)
yield ReadRowsPage(self._stream_parser, message)
def __iter__(self):
"""Iterator for each row in all pages."""
for page in self.pages:
for row in page:
yield row
def to_arrow(self):
"""Create a :class:`pyarrow.Table` of all rows in the stream.
This method requires the pyarrow library and a stream using the Arrow
format.
Returns:
pyarrow.Table:
A table of all rows in the stream.
"""
record_batches = []
for page in self.pages:
record_batches.append(page.to_arrow())
if record_batches:
return pyarrow.Table.from_batches(record_batches)
# No data, return an empty Table.
self._stream_parser._parse_arrow_schema()
return pyarrow.Table.from_batches([], schema=self._stream_parser._schema)
def to_dataframe(self, dtypes=None):
"""Create a :class:`pandas.DataFrame` of all rows in the stream.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row messages.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
if dtypes is None:
dtypes = {}
# If it's an Arrow stream, calling to_arrow, then converting to a
# pandas dataframe is about 2x faster. This is because pandas.concat is
# rarely no-copy, whereas pyarrow.Table.from_batches + to_pandas is
# usually no-copy.
try:
record_batch = self.to_arrow()
except NotImplementedError:
pass
else:
df = record_batch.to_pandas()
for column in dtypes:
df[column] = pandas.Series(df[column], dtype=dtypes[column])
return df
frames = [page.to_dataframe(dtypes=dtypes) for page in self.pages]
if frames:
return pandas.concat(frames)
# No data, construct an empty dataframe with columns matching the schema.
# The result should be consistent with what an empty ARROW stream would produce.
self._stream_parser._parse_avro_schema()
schema = self._stream_parser._avro_schema_json
column_dtypes = self._dtypes_from_avro(schema["fields"])
column_dtypes.update(dtypes)
df = pandas.DataFrame(columns=column_dtypes.keys())
for column in df:
df[column] = pandas.Series([], dtype=column_dtypes[column])
return df
def _dtypes_from_avro(self, avro_fields):
"""Determine Pandas dtypes for columns in Avro schema.
Args:
avro_fields (Iterable[Mapping[str, Any]]):
Avro fields' metadata.
Returns:
colelctions.OrderedDict[str, str]:
Column names with their corresponding Pandas dtypes.
"""
result = collections.OrderedDict()
type_map = {"long": "int64", "double": "float64", "boolean": "bool"}
for field_info in avro_fields:
# If a type is an union of multiple types, pick the first type
# that is not "null".
if isinstance(field_info["type"], list):
type_info = next(item for item in field_info["type"] if item != "null")
if isinstance(type_info, str):
field_dtype = type_map.get(type_info, "object")
else:
logical_type = type_info.get("logicalType")
if logical_type == "timestamp-micros":
field_dtype = "datetime64[ns, UTC]"
else:
field_dtype = "object"
result[field_info["name"]] = field_dtype
return result
class ReadRowsPage(object):
"""An iterator of rows from a read session message.
Args:
stream_parser (google.cloud.bigquery_storage_v1.reader._StreamParser):
A helper for parsing messages into rows.
message (google.cloud.bigquery_storage_v1.types.ReadRowsResponse):
A message of data from a read rows stream.
"""
# This class is modeled after google.api_core.page_iterator.Page and aims
# to provide API compatibility where possible.
def __init__(self, stream_parser, message):
self._stream_parser = stream_parser
self._message = message
self._iter_rows = None
self._num_items = self._message.row_count
self._remaining = self._message.row_count
def _parse_rows(self):
"""Parse rows from the message only once."""
if self._iter_rows is not None:
return
rows = self._stream_parser.to_rows(self._message)
self._iter_rows = iter(rows)
@property
def num_items(self):
"""int: Total items in the page."""
return self._num_items
@property
def remaining(self):
"""int: Remaining items in the page."""
return self._remaining
def __iter__(self):
"""A ``ReadRowsPage`` is an iterator."""
return self
def next(self):
"""Get the next row in the page."""
self._parse_rows()
if self._remaining > 0:
self._remaining -= 1
return next(self._iter_rows)
# Alias needed for Python 2/3 support.
__next__ = next
def to_arrow(self):
"""Create an :class:`pyarrow.RecordBatch` of rows in the page.
Returns:
pyarrow.RecordBatch:
Rows from the message, as an Arrow record batch.
"""
return self._stream_parser.to_arrow(self._message)
def to_dataframe(self, dtypes=None):
"""Create a :class:`pandas.DataFrame` of rows in the page.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row messages.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
if pandas is None:
raise ImportError(_PANDAS_REQUIRED)
return self._stream_parser.to_dataframe(self._message, dtypes=dtypes)
class _StreamParser(object):
def to_arrow(self, message):
raise NotImplementedError("Not implemented.")
def to_dataframe(self, message, dtypes=None):
raise NotImplementedError("Not implemented.")
def to_rows(self, message):
raise NotImplementedError("Not implemented.")
def _parse_avro_schema(self):
raise NotImplementedError("Not implemented.")
def _parse_arrow_schema(self):
raise NotImplementedError("Not implemented.")
@staticmethod
def from_read_session(read_session):
schema_type = read_session._pb.WhichOneof("schema")
if schema_type == "avro_schema":
return _AvroStreamParser(read_session)
elif schema_type == "arrow_schema":
return _ArrowStreamParser(read_session)
else:
raise TypeError(
"Unsupported schema type in read_session: {0}".format(schema_type)
)
@staticmethod
def from_read_rows_response(message):
schema_type = message._pb.WhichOneof("schema")
if schema_type == "avro_schema":
return _AvroStreamParser(message)
elif schema_type == "arrow_schema":
return _ArrowStreamParser(message)
else:
raise TypeError(
"Unsupported schema type in message: {0}".format(schema_type)
)
class _AvroStreamParser(_StreamParser):
"""Helper to parse Avro messages into useful representations."""
def __init__(self, message):
"""Construct an _AvroStreamParser.
Args:
message (Union[
google.cloud.bigquery_storage_v1.types.ReadSession, \
google.cloud.bigquery_storage_v1.types.ReadRowsResponse, \
]):
Either the first message of data from a read rows stream or a
read session. Both types contain a oneof "schema" field, which
can be used to determine how to deserialize rows.
"""
if fastavro is None:
raise ImportError(_FASTAVRO_REQUIRED)
self._first_message = message
self._avro_schema_json = None
self._fastavro_schema = None
self._column_names = None
def to_arrow(self, message):
"""Create an :class:`pyarrow.RecordBatch` of rows in the page.
Args:
message (google.cloud.bigquery_storage_v1.types.ReadRowsResponse):
Protocol buffer from the read rows stream, to convert into an
Arrow record batch.
Returns:
pyarrow.RecordBatch:
Rows from the message, as an Arrow record batch.
"""
raise NotImplementedError("to_arrow not implemented for Avro streams.")
def to_dataframe(self, message, dtypes=None):
"""Create a :class:`pandas.DataFrame` of rows in the page.
This method requires the pandas libary to create a data frame and the
fastavro library to parse row messages.
.. warning::
DATETIME columns are not supported. They are currently parsed as
strings in the fastavro library.
Args:
message ( \
~google.cloud.bigquery_storage_v1.types.ReadRowsResponse \
):
A message containing Avro bytes to parse into a pandas DataFrame.
dtypes ( \
Map[str, Union[str, pandas.Series.dtype]] \
):
Optional. A dictionary of column names pandas ``dtype``s. The
provided ``dtype`` is used when constructing the series for
the column specified. Otherwise, the default pandas behavior
is used.
Returns:
pandas.DataFrame:
A data frame of all rows in the stream.
"""
self._parse_avro_schema()
if dtypes is None:
dtypes = {}
columns = collections.defaultdict(list)
for row in self.to_rows(message):
for column in row:
columns[column].append(row[column])
for column in dtypes:
columns[column] = pandas.Series(columns[column], dtype=dtypes[column])
return pandas.DataFrame(columns, columns=self._column_names)
def _parse_avro_schema(self):
"""Extract and parse Avro schema from a read session."""
if self._avro_schema_json:
return
self._avro_schema_json = json.loads(self._first_message.avro_schema.schema)
self._column_names = tuple(
(field["name"] for field in self._avro_schema_json["fields"])
)
self._first_message = None
def _parse_fastavro(self):
"""Convert parsed Avro schema to fastavro format."""
self._parse_avro_schema()
self._fastavro_schema = fastavro.parse_schema(self._avro_schema_json)
def to_rows(self, message):
"""Parse all rows in a stream message.
Args:
message ( \
~google.cloud.bigquery_storage_v1.types.ReadRowsResponse \
):
A message containing Avro bytes to parse into rows.
Returns:
Iterable[Mapping]:
A sequence of rows, represented as dictionaries.
"""
self._parse_fastavro()
messageio = io.BytesIO(message.avro_rows.serialized_binary_rows)
while True:
# Loop in a while loop because schemaless_reader can only read
# a single record.
try:
# TODO: Parse DATETIME into datetime.datetime (no timezone),
# instead of as a string.
yield fastavro.schemaless_reader(messageio, self._fastavro_schema)
except StopIteration:
break # Finished with message
class _ArrowStreamParser(_StreamParser):
def __init__(self, message):
"""Construct an _ArrowStreamParser.
Args:
message (Union[
google.cloud.bigquery_storage_v1.types.ReadSession, \
google.cloud.bigquery_storage_v1.types.ReadRowsResponse, \
]):
Either the first message of data from a read rows stream or a
read session. Both types contain a oneof "schema" field, which
can be used to determine how to deserialize rows.
"""
if pyarrow is None:
raise ImportError(_PYARROW_REQUIRED)
self._first_message = message
self._schema = None
def to_arrow(self, message):
return self._parse_arrow_message(message)
def to_rows(self, message):
record_batch = self._parse_arrow_message(message)
# Iterate through each column simultaneously, and make a dict from the
# row values
for row in zip(*record_batch.columns):
yield dict(zip(self._column_names, row))
def to_dataframe(self, message, dtypes=None):
record_batch = self._parse_arrow_message(message)
if dtypes is None:
dtypes = {}
df = record_batch.to_pandas()
for column in dtypes:
df[column] = pandas.Series(df[column], dtype=dtypes[column])
return df
def _parse_arrow_message(self, message):
self._parse_arrow_schema()
return pyarrow.ipc.read_record_batch(
pyarrow.py_buffer(message.arrow_record_batch.serialized_record_batch),
self._schema,
)
def _parse_arrow_schema(self):
if self._schema:
return
self._schema = pyarrow.ipc.read_schema(
pyarrow.py_buffer(self._first_message.arrow_schema.serialized_schema)
)
self._column_names = [field.name for field in self._schema]
self._first_message = None
| googleapis/python-bigquery-storage | google/cloud/bigquery_storage_v1/reader.py | Python | apache-2.0 | 27,503 |
from capstone import *
from .architecture import Architecture
from avatar2.installer.config import GDB_X86, OPENOCD
class X86(Architecture):
get_gdb_executable = Architecture.resolve(GDB_X86)
get_oocd_executable = Architecture.resolve(OPENOCD)
qemu_name = 'i386'
gdb_name = 'i386'
registers = {'eax': 0,
'ecx': 1,
'edx': 2,
'ebx': 3,
'esp': 4,
'ebp': 5,
'esi': 6,
'edi': 7,
'eip': 8,
'pc': 8,
'eflags': 9,
'cs': 10,
'ss': 11,
'ds': 12,
'es': 13,
'fs': 14,
'gs': 15, }
special_registers = {
#SSE
'xmm0': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm0.v4_int32',
},
'xmm1': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm1.v4_int32',
},
'xmm2': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm2.v4_int32',
},
'xmm3': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm3.v4_int32',
},
'xmm4': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm4.v4_int32',
},
'xmm5': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm5.v4_int32',
},
'xmm6': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm6.v4_int32',
},
'xmm7': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm7.v4_int32',
},
'xmm8': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm8.v4_int32',
},
'xmm9': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm9.v4_int32',
},
'xmm10': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm10.v4_int32',
},
'xmm11': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm11.v4_int32',
},
'xmm12': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm12.v4_int32',
},
'xmm13': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm13.v4_int32',
},
'xmm14': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm14.v4_int32',
},
'xmm15': {'format': '{{{:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$xmm15.v4_int32',
},
#AVX
'ymm0': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm0.v8_int32',
},
'ymm1': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm1.v8_int32',
},
'ymm2': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm2.v8_int32',
},
'ymm3': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm3.v8_int32',
},
'ymm4': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm4.v8_int32',
},
'ymm5': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm5.v8_int32',
},
'ymm6': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm6.v8_int32',
},
'ymm7': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm7.v8_int32',
},
'ymm8': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm8.v8_int32',
},
'ymm9': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm9.v8_int32',
},
'ymm10': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm10.v8_int32',
},
'ymm11': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm11.v8_int32',
},
'ymm12': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm12.v8_int32',
},
'ymm13': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm13.v8_int32',
},
'ymm14': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm14.v8_int32',
},
'ymm15': {'format': '{{{:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}, {:d}}}',
'gdb_expression': '$ymm15.v8_int32',
},
}
sr_name = 'eflags'
unemulated_instructions = []
capstone_arch = CS_ARCH_X86
capstone_mode = CS_MODE_32
word_size = 32
class X86_64(X86):
qemu_name = 'x86_64'
gdb_name = 'i386:x86-64'
registers = {'rax': 0,
'rbx': 1,
'rcx': 2,
'rdx': 3,
'rsi': 4,
'rdi': 5,
'rbp': 6,
'rsp': 7,
'r8': 8,
'r9': 9,
'r10': 10,
'r11': 11,
'r12': 12,
'r13': 13,
'r14': 14,
'r15': 15,
'rip': 16,
'pc': 16,
'eflags': 17,
'cs': 18,
'ss': 19,
'ds': 20,
'es': 21,
'fs': 22,
'gs': 23,
}
capstone_mode = CS_MODE_64
unemulated_instructions = []
capstone_mode = CS_MODE_64
word_size = 64
| avatartwo/avatar2 | avatar2/archs/x86.py | Python | apache-2.0 | 6,401 |
# Copyright (c) 2015 Intel Research and Development Ireland Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import instantiation_validation_benchmark as base
from experimental_framework import common
NUM_OF_NEIGHBORS = 'num_of_neighbours'
AMOUNT_OF_RAM = 'amount_of_ram'
NUMBER_OF_CORES = 'number_of_cores'
NETWORK_NAME = 'network'
SUBNET_NAME = 'subnet'
class InstantiationValidationNoisyNeighborsBenchmark(
base.InstantiationValidationBenchmark):
def __init__(self, name, params):
base.InstantiationValidationBenchmark.__init__(self, name, params)
if common.RELEASE == 'liberty':
temp_name = 'stress_workload_liberty.yaml'
else:
temp_name = 'stress_workload.yaml'
self.template_file = common.get_template_dir() + \
temp_name
self.stack_name = 'neighbour'
self.neighbor_stack_names = list()
def get_features(self):
features = super(InstantiationValidationNoisyNeighborsBenchmark,
self).get_features()
features['description'] = 'Instantiation Validation Benchmark ' \
'with noisy neghbors'
features['parameters'].append(NUM_OF_NEIGHBORS)
features['parameters'].append(AMOUNT_OF_RAM)
features['parameters'].append(NUMBER_OF_CORES)
features['parameters'].append(NETWORK_NAME)
features['parameters'].append(SUBNET_NAME)
features['allowed_values'][NUM_OF_NEIGHBORS] = \
['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
features['allowed_values'][NUMBER_OF_CORES] = \
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
features['allowed_values'][AMOUNT_OF_RAM] = \
['256M', '1G', '2G', '3G', '4G', '5G', '6G', '7G', '8G', '9G',
'10G']
features['default_values'][NUM_OF_NEIGHBORS] = '1'
features['default_values'][NUMBER_OF_CORES] = '1'
features['default_values'][AMOUNT_OF_RAM] = '256M'
features['default_values'][NETWORK_NAME] = ''
features['default_values'][SUBNET_NAME] = ''
return features
def init(self):
super(InstantiationValidationNoisyNeighborsBenchmark, self).init()
common.replace_in_file(self.lua_file, 'local out_file = ""',
'local out_file = "' +
self.results_file + '"')
heat_param = dict()
heat_param['network'] = self.params[NETWORK_NAME]
heat_param['subnet'] = self.params[SUBNET_NAME]
heat_param['cores'] = self.params['number_of_cores']
heat_param['memory'] = self.params['amount_of_ram']
for i in range(0, int(self.params['num_of_neighbours'])):
stack_name = self.stack_name + str(i)
common.DEPLOYMENT_UNIT.deploy_heat_template(self.template_file,
stack_name,
heat_param)
self.neighbor_stack_names.append(stack_name)
def finalize(self):
common.replace_in_file(self.lua_file, 'local out_file = "' +
self.results_file + '"',
'local out_file = ""')
# destroy neighbor stacks
for stack_name in self.neighbor_stack_names:
common.DEPLOYMENT_UNIT.destroy_heat_template(stack_name)
self.neighbor_stack_names = list()
| dtudares/hello-world | yardstick/yardstick/vTC/apexlake/experimental_framework/benchmarks/instantiation_validation_noisy_neighbors_benchmark.py | Python | apache-2.0 | 3,974 |
## Copyright 2022 Google LLC
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## https://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""Sends a text mesage to the user with a suggestion action to dial a phone number.
Read more: https://developers.google.com/business-communications/business-messages/guides/how-to/message/send?hl=en#dial_action
This code is based on the https://github.com/google-business-communications/python-businessmessages
Python Business Messages client library.
"""
import uuid
from businessmessages import businessmessages_v1_client as bm_client
from businessmessages.businessmessages_v1_messages import BusinessmessagesConversationsMessagesCreateRequest
from businessmessages.businessmessages_v1_messages import BusinessMessagesDialAction
from businessmessages.businessmessages_v1_messages import BusinessMessagesMessage
from businessmessages.businessmessages_v1_messages import BusinessMessagesRepresentative
from businessmessages.businessmessages_v1_messages import BusinessMessagesSuggestedAction
from businessmessages.businessmessages_v1_messages import BusinessMessagesSuggestion
from oauth2client.service_account import ServiceAccountCredentials
# Edit the values below:
path_to_service_account_key = './service_account_key.json'
conversation_id = 'EDIT_HERE'
credentials = ServiceAccountCredentials.from_json_keyfile_name(
path_to_service_account_key,
scopes=['https://www.googleapis.com/auth/businessmessages'])
client = bm_client.BusinessmessagesV1(credentials=credentials)
representative_type_as_string = 'BOT'
if representative_type_as_string == 'BOT':
representative_type = BusinessMessagesRepresentative.RepresentativeTypeValueValuesEnum.BOT
else:
representative_type = BusinessMessagesRepresentative.RepresentativeTypeValueValuesEnum.HUMAN
# Create a text message with a dial action and fallback text
message = BusinessMessagesMessage(
messageId=str(uuid.uuid4().int),
representative=BusinessMessagesRepresentative(
representativeType=representative_type
),
text='Contact support for help with this issue.',
fallback='Give us a call at +12223334444.',
suggestions=[
BusinessMessagesSuggestion(
action=BusinessMessagesSuggestedAction(
text='Call support',
postbackData='call-support',
dialAction=BusinessMessagesDialAction(
phoneNumber='+12223334444'))
),
])
# Create the message request
create_request = BusinessmessagesConversationsMessagesCreateRequest(
businessMessagesMessage=message,
parent='conversations/' + conversation_id)
# Send the message
bm_client.BusinessmessagesV1.ConversationsMessagesService(
client=client).Create(request=create_request)
| google-business-communications/bm-snippets-python | send-message-suggested-action-dial.py | Python | apache-2.0 | 3,227 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from world import world
from bigml.api import HTTP_OK
def i_get_the_project(step, resource):
resource = world.api.get_project(resource)
world.status = resource['code']
assert world.status == HTTP_OK
world.project = resource['object']
| ShaguptaS/python | bigml/tests/read_project_steps.py | Python | apache-2.0 | 869 |
# automate/server/user/views.py
#################
#### imports ####
#################
#from flask import render_template, Blueprint, url_for, \
# redirect, flash, request
#from flask_login import login_user, logout_user, login_required
#from automate.server import bcrypt, db
#from automate.server import db
#from automate.server.models import User
#from automate.server.user.forms import LoginForm, RegisterForm
################
#### config ####
################
#user_blueprint = Blueprint('user', __name__,)
################
#### routes ####
################
#@user_blueprint.route('/register', methods=['GET', 'POST'])
#def register():
# form = RegisterForm(request.form)
# if form.validate_on_submit():
# user = User(
# email=form.email.data,
# password=form.password.data
# )
# db.session.add(user)
# db.session.commit()
#
# login_user(user)
#
# flash('Thank you for registering.', 'success')
# return redirect(url_for("user.members"))
#
# return render_template('user/register.html', form=form)
#
#
#@user_blueprint.route('/login', methods=['GET', 'POST'])
#def login():
# form = LoginForm(request.form)
# if form.validate_on_submit():
# user = User.query.filter_by(email=form.email.data).first()
# if user:
# #if user and bcrypt.check_password_hash(
# # user.password, request.form['password']):
# # login_user(user)
# flash('You are logged in. Welcome!', 'success')
# return redirect(url_for('user.members'))
# else:
# flash('Invalid email and/or password.', 'danger')
# return render_template('user/login.html', form=form)
# return render_template('user/login.html', title='Please Login', form=form)
#
#
#@user_blueprint.route('/logout')
#@login_required
#def logout():
# logout_user()
# flash('You were logged out. Bye!', 'success')
# return redirect(url_for('main.home'))
#
#
#@user_blueprint.route('/members')
#@login_required
#def members():
# return render_template('user/members.html')
# | JeromeErasmus/browserstack_automate | automate/server/user/views.py | Python | apache-2.0 | 2,112 |
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.db import models_v2
from neutron.extensions import l3
from neutron_lib import constants as l3_constants
from neutron_lib import exceptions as n_exc
from networking_cisco._i18n import _, _LW
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.db.l3 import ha_db
from networking_cisco.plugins.cisco.db.l3 import l3_models
from networking_cisco.plugins.cisco.db.l3.l3_router_appliance_db import (
L3RouterApplianceDBMixin)
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from networking_cisco.plugins.cisco.extensions import routertypeawarescheduler
from networking_cisco.plugins.cisco.l3 import drivers
LOG = logging.getLogger(__name__)
DEVICE_OWNER_GLOBAL_ROUTER_GW = cisco_constants.DEVICE_OWNER_GLOBAL_ROUTER_GW
HOSTING_DEVICE_ATTR = routerhostingdevice.HOSTING_DEVICE_ATTR
ROUTER_ROLE_GLOBAL = cisco_constants.ROUTER_ROLE_GLOBAL
ROUTER_ROLE_LOGICAL_GLOBAL = cisco_constants.ROUTER_ROLE_LOGICAL_GLOBAL
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY
TENANT_HSRP_GRP_RANGE = 1
TENANT_HSRP_GRP_OFFSET = 1064
EXT_HSRP_GRP_RANGE = 1
EXT_HSRP_GRP_OFFSET = 1064
N_ROUTER_PREFIX = 'nrouter-'
DEV_NAME_LEN = 14
class TopologyNotSupportedByRouterError(n_exc.Conflict):
message = _("Requested topology cannot be supported by router.")
class ASR1kL3RouterDriver(drivers.L3RouterBaseDriver):
def create_router_precommit(self, context, router_context):
pass
def create_router_postcommit(self, context, router_context):
pass
def update_router_precommit(self, context, router_context):
pass
def update_router_postcommit(self, context, router_context):
# Whenever a gateway is added to, or removed from, a router hosted on
# a hosting device, we must ensure that a global router is running
# (for add operation) or not running (for remove operation) on that
# hosting device.
current = router_context.current
if current[HOSTING_DEVICE_ATTR] is None:
return
e_context = context.elevated()
if current['gw_port_id']:
self._conditionally_add_global_router(e_context, current)
else:
self._conditionally_remove_global_router(
e_context, router_context.original, True)
def delete_router_precommit(self, context, router_context):
pass
def delete_router_postcommit(self, context, router_context):
pass
def schedule_router_precommit(self, context, router_context):
pass
def schedule_router_postcommit(self, context, router_context):
# When the hosting device hosts a Neutron router with external
# connectivity, a "global" router (modeled as a Neutron router) must
# also run on the hosting device (outside of any VRF) to enable the
# connectivity.
current = router_context.current
if current['gw_port_id'] and current[HOSTING_DEVICE_ATTR] is not None:
self._conditionally_add_global_router(context.elevated(), current)
def unschedule_router_precommit(self, context, router_context):
pass
def unschedule_router_postcommit(self, context, router_context):
# When there is no longer any router with external gateway hosted on
# a hosting device, the global router on that hosting device can also
# be removed.
current = router_context.current
hd_id = current[HOSTING_DEVICE_ATTR]
if current['gw_port_id'] and hd_id is not None:
self._conditionally_remove_global_router(context.elevated(),
current)
def add_router_interface_precommit(self, context, r_port_context):
# Inside an ASR1k, VLAN sub-interfaces are used to connect to internal
# neutron networks. Only one such sub-interface can be created for each
# VLAN. As the VLAN sub-interface is added to the VRF representing the
# Neutron router, we must only allow one Neutron router to attach to a
# particular Neutron subnet/network.
if (r_port_context.router_context.current[routerrole.ROUTER_ROLE_ATTR]
== ROUTER_ROLE_HA_REDUNDANCY):
# redundancy routers can be exempt as we check the user visible
# routers and the request will be rejected there.
return
e_context = context.elevated()
if r_port_context.current is None:
sn = self._core_plugin.get_subnet(e_context,
r_port_context.current_subnet_id)
net_id = sn['network_id']
else:
net_id = r_port_context.current['network_id']
filters = {'network_id': [net_id],
'device_owner': [bc.constants.DEVICE_OWNER_ROUTER_INTF]}
for port in self._core_plugin.get_ports(e_context,
filters=filters):
router_id = port['device_id']
if router_id is None:
continue
router = self._l3_plugin.get_router(e_context, router_id)
if router[routerrole.ROUTER_ROLE_ATTR] is None:
raise TopologyNotSupportedByRouterError()
def add_router_interface_postcommit(self, context, r_port_context):
pass
def remove_router_interface_precommit(self, context, r_port_context):
pass
def remove_router_interface_postcommit(self, context, r_port_context):
pass
def create_floatingip_precommit(self, context, fip_context):
pass
def create_floatingip_postcommit(self, context, fip_context):
pass
def update_floatingip_precommit(self, context, fip_context):
pass
def update_floatingip_postcommit(self, context, fip_context):
pass
def delete_floatingip_precommit(self, context, fip_context):
pass
def delete_floatingip_postcommit(self, context, fip_context):
pass
def ha_interface_ip_address_needed(self, context, router, port,
ha_settings_db, ha_group_uuid):
if port['device_owner'] == bc.constants.DEVICE_OWNER_ROUTER_GW:
return False
else:
return True
def generate_ha_group_id(self, context, router, port, ha_settings_db,
ha_group_uuid):
if port['device_owner'] in {bc.constants.DEVICE_OWNER_ROUTER_GW,
DEVICE_OWNER_GLOBAL_ROUTER_GW}:
ri_name = self._router_name(router['id'])[8:DEV_NAME_LEN]
group_id = int(ri_name, 16) % TENANT_HSRP_GRP_RANGE
group_id += TENANT_HSRP_GRP_OFFSET
return group_id
else:
net_id_digits = port['network_id'][:6]
group_id = int(net_id_digits, 16) % EXT_HSRP_GRP_RANGE
group_id += EXT_HSRP_GRP_OFFSET
return group_id
def pre_backlog_processing(self, context):
filters = {routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
global_routers = self._l3_plugin.get_routers(context, filters=filters)
if not global_routers:
LOG.debug("There are no global routers")
return
for gr in global_routers:
filters = {
HOSTING_DEVICE_ATTR: [gr[HOSTING_DEVICE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_HA_REDUNDANCY, None]
}
invert_filters = {'gw_port_id': [None]}
num_rtrs = self._l3_plugin.get_routers_count_extended(
context, filters=filters, invert_filters=invert_filters)
LOG.debug("Global router %(name)s[%(id)s] with hosting_device "
"%(hd)s has %(num)d routers with gw_port set on that "
"device",
{'name': gr['name'], 'id': gr['id'],
'hd': gr[HOSTING_DEVICE_ATTR], 'num': num_rtrs, })
if num_rtrs == 0:
LOG.warning(
_LW("Global router:%(name)s[id:%(id)s] is present for "
"hosting device:%(hd)s but there are no tenant or "
"redundancy routers with gateway set on that hosting "
"device. Proceeding to delete global router."),
{'name': gr['name'], 'id': gr['id'],
'hd': gr[HOSTING_DEVICE_ATTR]})
self._delete_global_router(context, gr['id'])
filters = {
#TODO(bmelande): Filter on routertype of global router
#routertype.TYPE_ATTR: [routertype_id],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
log_global_routers = self._l3_plugin.get_routers(
context, filters=filters)
if log_global_routers:
log_global_router_id = log_global_routers[0]['id']
self._delete_global_router(context, log_global_router_id,
logical=True)
def post_backlog_processing(self, context):
pass
# ---------------- Create workflow functions -----------------
def _conditionally_add_global_router(self, context, tenant_router):
# We could filter on hosting device id but we don't so we get all
# global routers for this router type. We can then use that count to
# determine which ha priority a new global router should get.
filters = {
routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL]}
global_routers = self._l3_plugin.get_routers(
context, filters=filters)
hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
hosting_device_id = tenant_router[HOSTING_DEVICE_ATTR]
ext_nw_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
global_router = hd_to_gr_dict.get(hosting_device_id)
logical_global_router = self._get_logical_global_router(context,
tenant_router)
self._conditionally_add_auxiliary_external_gateway_port(
context, logical_global_router, ext_nw_id, tenant_router, True)
if global_router is None:
# must create global router on hosting device
global_router = self._create_global_router(
context, hosting_device_id, hd_to_gr_dict, tenant_router,
logical_global_router)
self._conditionally_add_auxiliary_external_gateway_port(
context, global_router, ext_nw_id, tenant_router)
self._l3_plugin.add_type_and_hosting_device_info(context,
global_router)
for ni in self._l3_plugin.get_notifiers(context, [global_router]):
if ni['notifier']:
ni['notifier'].routers_updated(context, ni['routers'])
def _conditionally_add_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, tenant_router,
provision_ha=False, port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
# tbe global router may or may not have an interface on the
# external network that the tenant router uses
filters = {
'device_id': [global_router['id']],
'device_owner': [port_type]}
connected_nets = {
p['network_id']: p['fixed_ips'] for p in
self._core_plugin.get_ports(context, filters=filters)}
if ext_net_id in connected_nets:
# already connected to the external network so we're done
return
else:
# not connected to the external network, so let's fix that
aux_gw_port = self._create_auxiliary_external_gateway_port(
context, global_router, ext_net_id, tenant_router, port_type)
if provision_ha:
self._provision_port_ha(context, aux_gw_port, global_router)
def _create_auxiliary_external_gateway_port(
self, context, global_router, ext_net_id, tenant_router,
port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
# When a global router is connected to an external network then a
# special type of gateway port is created on that network. Such a
# port is called auxiliary gateway ports. It has an ip address on
# each subnet of the external network. A (logical) global router
# never has a traditional Neutron gateway port.
filters = {
'device_id': [tenant_router['id']],
'device_owner': [l3_constants.DEVICE_OWNER_ROUTER_GW]}
# fetch the gateway port of the *tenant* router so we can determine
# the CIDR of that port's subnet
gw_port = self._core_plugin.get_ports(context,
filters=filters)[0]
fixed_ips = self._get_fixed_ips_subnets(context, gw_port)
global_router_id = global_router['id']
with context.session.begin(subtransactions=True):
aux_gw_port = self._core_plugin.create_port(context, {
'port': {
'tenant_id': '', # intentionally not set
'network_id': ext_net_id,
'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': global_router_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}})
router_port = bc.RouterPort(
port_id=aux_gw_port['id'],
router_id=global_router_id,
port_type=port_type)
context.session.add(router_port)
return aux_gw_port
def _create_global_router(
self, context, hosting_device_id, hd_to_gr_dict, tenant_router,
logical_global_router):
r_spec = {'router': {
# global routers are not tied to any tenant
'tenant_id': '',
'name': self._global_router_name(hosting_device_id),
'admin_state_up': True}}
global_router, r_hd_b_db = self._l3_plugin.do_create_router(
context, r_spec, tenant_router[routertype.TYPE_ATTR], False,
True, hosting_device_id, ROUTER_ROLE_GLOBAL)
# make the global router a redundancy router for the logical
# global router (which we treat as a hidden "user visible
# router" (how's that for a contradiction of terms! :-) )
with context.session.begin(subtransactions=True):
ha_priority = (
ha_db.DEFAULT_MASTER_PRIORITY -
len(hd_to_gr_dict) * ha_db.PRIORITY_INCREASE_STEP)
r_b_b = ha_db.RouterRedundancyBinding(
redundancy_router_id=global_router['id'],
priority=ha_priority,
user_router_id=logical_global_router['id'])
context.session.add(r_b_b)
return global_router
def _get_logical_global_router(self, context, tenant_router):
# Since HA is also enabled on the global routers on each hosting device
# those global routers need HA settings and VIPs. We represent that
# using a Neutron router that is never instantiated/hosted. That
# Neutron router is referred to as the "logical global" router.
filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
logical_global_routers = self._l3_plugin.get_routers(
context, filters=filters)
if not logical_global_routers:
# must create logical global router
logical_global_router = self._create_logical_global_router(
context, tenant_router)
else:
logical_global_router = logical_global_routers[0]
self._update_ha_redundancy_level(context, logical_global_router, 1)
return logical_global_router
def _create_logical_global_router(self, context, tenant_router):
r_spec = {'router': {
# global routers are not tied to any tenant
'tenant_id': '',
'name': self._global_router_name('', logical=True),
'admin_state_up': True,
# set auto-schedule to false to keep this router un-hosted
routertypeawarescheduler.AUTO_SCHEDULE_ATTR: False}}
# notifications should never be sent for this logical router!
logical_global_router, r_hd_b_db = (
self._l3_plugin.do_create_router(
context, r_spec, tenant_router[routertype.TYPE_ATTR],
False, True, None, ROUTER_ROLE_LOGICAL_GLOBAL))
with context.session.begin(subtransactions=True):
r_ha_s_db = ha_db.RouterHASetting(
router_id=logical_global_router['id'],
ha_type=cfg.CONF.ha.default_ha_mechanism,
redundancy_level=1,
priority=ha_db.DEFAULT_MASTER_PRIORITY,
probe_connectivity=False,
probe_target=None,
probe_interval=None)
context.session.add(r_ha_s_db)
return logical_global_router
def _get_fixed_ips_subnets(self, context, gw_port):
nw = self._core_plugin.get_network(context, gw_port['network_id'])
subnets = [{'subnet_id': s} for s in nw['subnets']]
return subnets
def _provision_port_ha(self, context, ha_port, router, ha_binding_db=None):
ha_group_uuid = uuidutils.generate_uuid()
router_id = router['id']
with context.session.begin(subtransactions=True):
if ha_binding_db is None:
ha_binding_db = self._get_ha_binding(context, router_id)
group_id = self.generate_ha_group_id(
context, router,
{'device_owner': DEVICE_OWNER_GLOBAL_ROUTER_GW}, ha_binding_db,
ha_group_uuid)
r_ha_g = ha_db.RouterHAGroup(
id=ha_group_uuid,
tenant_id='',
ha_type=ha_binding_db.ha_type,
group_identity=group_id,
ha_port_id=ha_port['id'],
extra_port_id=None,
subnet_id=ha_port['fixed_ips'][0]['subnet_id'],
user_router_id=router_id,
timers_config='',
tracking_config='',
other_config='')
context.session.add(r_ha_g)
def _get_ha_binding(self, context, router_id):
with context.session.begin(subtransactions=True):
query = context.session.query(ha_db.RouterHASetting)
query = query.filter(
ha_db.RouterHASetting.router_id == router_id)
return query.first()
# ---------------- Remove workflow functions -----------------
def _conditionally_remove_global_router(self, context, tenant_router,
update_operation=False):
filters = {routertype.TYPE_ATTR: [tenant_router[routertype.TYPE_ATTR]],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_GLOBAL],
HOSTING_DEVICE_ATTR: [tenant_router[HOSTING_DEVICE_ATTR]]}
global_routers = self._l3_plugin.get_routers(context,
filters=filters)
hd_to_gr_dict = {r[HOSTING_DEVICE_ATTR]: r for r in global_routers}
if global_routers:
global_router_id = global_routers[0]['id']
if not tenant_router or not tenant_router[l3.EXTERNAL_GW_INFO]:
# let l3 plugin's periodic backlog processing take care of the
# clean up of the global router
return
ext_net_id = tenant_router[l3.EXTERNAL_GW_INFO]['network_id']
routertype_id = tenant_router[routertype.TYPE_ATTR]
hd_id = tenant_router[HOSTING_DEVICE_ATTR]
global_router = hd_to_gr_dict.get(hd_id)
port_deleted = self._conditionally_remove_auxiliary_gateway_port(
context, global_router_id, ext_net_id, routertype_id, hd_id,
update_operation)
if port_deleted is False:
# since no auxiliary gateway port was deleted we can
# abort no since auxiliary gateway port count cannot
# have reached zero
return
filters = {
'device_id': [global_router_id],
'device_owner': [DEVICE_OWNER_GLOBAL_ROUTER_GW]}
num_aux_gw_ports = self._core_plugin.get_ports_count(
context, filters=filters)
if num_aux_gw_ports == 0:
# global router not needed any more so we delete it
self._delete_global_router(context, global_router_id)
do_notify = False
else:
do_notify = True
# process logical global router to remove its port
self._conditionally_remove_auxiliary_gateway_vip_port(
context, ext_net_id, routertype_id)
self._l3_plugin.add_type_and_hosting_device_info(context,
global_router)
if do_notify is True:
for ni in self._l3_plugin.get_notifiers(context,
[global_router]):
if ni['notifier']:
ni['notifier'].routers_updated(context, ni['routers'])
def _conditionally_remove_auxiliary_gateway_port(
self, context, router_id, ext_net_id, routertype_id,
hosting_device_id, update_operation=False):
num_rtrs = self._get_gateway_routers_count(
context, ext_net_id, routertype_id, None, hosting_device_id)
if ((num_rtrs <= 1 and update_operation is False) or
(num_rtrs == 0 and update_operation is True)):
# there are no tenant routers *on ext_net_id* that are serviced by
# this global router so it's aux gw port can be deleted
self._delete_auxiliary_gateway_ports(context, router_id,
ext_net_id)
return True
return False
def _conditionally_remove_auxiliary_gateway_vip_port(
self, context, ext_net_id, routertype_id):
filters = {routertype.TYPE_ATTR: [routertype_id],
routerrole.ROUTER_ROLE_ATTR: [ROUTER_ROLE_LOGICAL_GLOBAL]}
log_global_routers = self._l3_plugin.get_routers(context,
filters=filters)
if not log_global_routers:
return
self._update_ha_redundancy_level(context, log_global_routers[0], -1)
log_global_router_id = log_global_routers[0]['id']
num_global_rtrs = self._get_gateway_routers_count(
context, ext_net_id, routertype_id, ROUTER_ROLE_GLOBAL)
if num_global_rtrs == 0:
# there are no global routers *on ext_net_id* that are serviced by
# this logical global router so it's aux gw VIP port can be deleted
self._delete_auxiliary_gateway_ports(context, log_global_router_id,
ext_net_id)
filters[routerrole.ROUTER_ROLE_ATTR] = [ROUTER_ROLE_GLOBAL]
total_num_global_rtrs = self._l3_plugin.get_routers_count(
context, filters=filters)
if total_num_global_rtrs == 0:
# there are no global routers left that are serviced by this
# logical global router so it can be deleted
self._delete_global_router(context, log_global_router_id, True)
return False
def _delete_auxiliary_gateway_ports(
self, context, router_id, net_id=None,
port_type=DEVICE_OWNER_GLOBAL_ROUTER_GW):
filters = {
'device_id': [router_id],
'device_owner': [port_type]}
if net_id is not None:
filters['network_id'] = [net_id]
for port in self._core_plugin.get_ports(context, filters=filters):
try:
self._core_plugin.delete_port(context, port['id'],
l3_port_check=False)
except (exc.ObjectDeletedError, n_exc.PortNotFound) as e:
LOG.warning(e)
def _delete_global_router(self, context, global_router_id, logical=False):
# ensure we clean up any stale auxiliary gateway ports
self._delete_auxiliary_gateway_ports(context, global_router_id)
try:
if logical is True:
# We use parent class method as no special operations beyond
# what the base implemenation does are needed for logical
# global router
super(L3RouterApplianceDBMixin, self._l3_plugin).delete_router(
context, global_router_id)
else:
self._l3_plugin.delete_router(
context, global_router_id, unschedule=False)
except (exc.ObjectDeletedError, l3.RouterNotFound) as e:
LOG.warning(e)
def _get_gateway_routers_count(self, context, ext_net_id, routertype_id,
router_role, hosting_device_id=None):
# Determine number of routers (with routertype_id and router_role)
# that act as gateway to ext_net_id and that are hosted on
# hosting_device_id (if specified).
query = context.session.query(bc.Router)
if router_role in [None, ROUTER_ROLE_HA_REDUNDANCY]:
# tenant router roles
query = query.join(models_v2.Port,
models_v2.Port.id == bc.Router.gw_port_id)
role_filter = expr.or_(
l3_models.RouterHostingDeviceBinding.role == expr.null(),
l3_models.RouterHostingDeviceBinding.role ==
ROUTER_ROLE_HA_REDUNDANCY)
else:
# global and logical global routers
query = query.join(models_v2.Port,
models_v2.Port.device_owner == bc.Router.id)
role_filter = (
l3_models.RouterHostingDeviceBinding.role == router_role)
query = query.join(
l3_models.RouterHostingDeviceBinding,
l3_models.RouterHostingDeviceBinding.router_id == bc.Router.id)
query = query.filter(
role_filter,
models_v2.Port.network_id == ext_net_id,
l3_models.RouterHostingDeviceBinding.router_type_id ==
routertype_id)
if hosting_device_id is not None:
query = query.filter(
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
hosting_device_id)
return query.count()
# ---------------- General support functions -----------------
def _update_ha_redundancy_level(self, context, logical_global_router,
delta):
with context.session.begin(subtransactions=True):
log_g_router_db = self._l3_plugin._get_router(
context, logical_global_router['id'])
log_g_router_db.ha_settings.redundancy_level += delta
context.session.add(log_g_router_db.ha_settings)
def _router_name(self, router_id):
return N_ROUTER_PREFIX + router_id
def _global_router_name(self, hosting_device_id, logical=False):
if logical is True:
return cisco_constants.LOGICAL_ROUTER_ROLE_NAME
else:
return '%s-%s' % (cisco_constants.ROUTER_ROLE_NAME_PREFIX,
hosting_device_id[-cisco_constants.ROLE_ID_LEN:])
@property
def _core_plugin(self):
return bc.get_plugin()
@property
def _l3_plugin(self):
return bc.get_plugin(bc.constants.L3)
| Gitweijie/first_project | networking_cisco/plugins/cisco/l3/drivers/asr1k/asr1k_routertype_driver.py | Python | apache-2.0 | 29,107 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import pytest
from google.cloud.vision import enums
from google.cloud.vision_v1 import ProductSearchClient
from google.cloud.vision_v1.proto.image_annotator_pb2 import (
AnnotateImageResponse,
EntityAnnotation,
SafeSearchAnnotation,
)
from google.cloud.vision_v1.proto.product_search_service_pb2 import Product, ProductSet, ReferenceImage
from google.protobuf.json_format import MessageToDict
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.vision import ERR_DIFF_NAMES, ERR_UNABLE_TO_CREATE, CloudVisionHook
from tests.providers.google.cloud.utils.base_gcp_mock import mock_base_gcp_hook_default_project_id
PROJECT_ID_TEST = 'project-id'
PROJECT_ID_TEST_2 = 'project-id-2'
LOC_ID_TEST = 'loc-id'
LOC_ID_TEST_2 = 'loc-id-2'
PRODUCTSET_ID_TEST = 'ps-id'
PRODUCTSET_ID_TEST_2 = 'ps-id-2'
PRODUCTSET_NAME_TEST = f'projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/productSets/{PRODUCTSET_ID_TEST}'
PRODUCT_ID_TEST = 'p-id'
PRODUCT_ID_TEST_2 = 'p-id-2'
PRODUCT_NAME_TEST = f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/{PRODUCT_ID_TEST}"
PRODUCT_NAME = f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/{PRODUCT_ID_TEST}"
REFERENCE_IMAGE_ID_TEST = 'ri-id'
REFERENCE_IMAGE_GEN_ID_TEST = 'ri-id'
ANNOTATE_IMAGE_REQUEST = {
'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
}
BATCH_ANNOTATE_IMAGE_REQUEST = [
{
'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
},
{
'image': {'source': {'image_uri': "gs://bucket-name/object-name"}},
'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
},
]
REFERENCE_IMAGE_NAME_TEST = (
f"projects/{PROJECT_ID_TEST}/locations/{LOC_ID_TEST}/products/"
f"{PRODUCTSET_ID_TEST}/referenceImages/{REFERENCE_IMAGE_ID_TEST}"
)
REFERENCE_IMAGE_TEST = ReferenceImage(name=REFERENCE_IMAGE_GEN_ID_TEST)
REFERENCE_IMAGE_WITHOUT_ID_NAME = ReferenceImage()
DETECT_TEST_IMAGE = {"source": {"image_uri": "https://foo.com/image.jpg"}}
DETECT_TEST_ADDITIONAL_PROPERTIES = {"test-property-1": "test-value-1", "test-property-2": "test-value-2"}
class TestGcpVisionHook(unittest.TestCase):
def setUp(self):
with mock.patch(
'airflow.providers.google.cloud.hooks.vision.CloudVisionHook.__init__',
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudVisionHook(gcp_conn_id='test')
@mock.patch(
"airflow.providers.google.cloud.hooks.vision.CloudVisionHook.client_info",
new_callable=mock.PropertyMock,
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook._get_credentials")
@mock.patch("airflow.providers.google.cloud.hooks.vision.ProductSearchClient")
def test_product_search_client_creation(self, mock_client, mock_get_creds, mock_client_info):
result = self.hook.get_conn()
mock_client.assert_called_once_with(
credentials=mock_get_creds.return_value, client_info=mock_client_info.return_value
)
assert mock_client.return_value == result
assert self.hook._client == result
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_productset_explicit_id(self, get_conn):
# Given
create_product_set_method = get_conn.return_value.create_product_set
create_product_set_method.return_value = None
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product_set = ProductSet()
# When
result = self.hook.create_product_set(
location=LOC_ID_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_set=product_set,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
# ProductSet ID was provided explicitly in the method call above, should be returned from the method
assert result == PRODUCTSET_ID_TEST
create_product_set_method.assert_called_once_with(
parent=parent,
product_set=product_set,
product_set_id=PRODUCTSET_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_productset_autogenerated_id(self, get_conn):
# Given
autogenerated_id = 'autogen-id'
response_product_set = ProductSet(
name=ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, autogenerated_id)
)
create_product_set_method = get_conn.return_value.create_product_set
create_product_set_method.return_value = response_product_set
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product_set = ProductSet()
# When
result = self.hook.create_product_set(
location=LOC_ID_TEST, product_set_id=None, product_set=product_set, project_id=PROJECT_ID_TEST
)
# Then
# ProductSet ID was not provided in the method call above. Should be extracted from the API response
# and returned.
assert result == autogenerated_id
create_product_set_method.assert_called_once_with(
parent=parent,
product_set=product_set,
product_set_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_productset_autogenerated_id_wrong_api_response(self, get_conn):
# Given
response_product_set = None
create_product_set_method = get_conn.return_value.create_product_set
create_product_set_method.return_value = response_product_set
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product_set = ProductSet()
# When
with pytest.raises(AirflowException) as ctx:
self.hook.create_product_set(
location=LOC_ID_TEST,
product_set_id=None,
product_set=product_set,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
# API response was wrong (None) and thus ProductSet ID extraction should fail.
err = ctx.value
assert 'Unable to get name from response...' in str(err)
create_product_set_method.assert_called_once_with(
parent=parent,
product_set=product_set,
product_set_id=None,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_get_productset(self, get_conn):
# Given
name = ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST)
response_product_set = ProductSet(name=name)
get_product_set_method = get_conn.return_value.get_product_set
get_product_set_method.return_value = response_product_set
# When
response = self.hook.get_product_set(
location=LOC_ID_TEST, product_set_id=PRODUCTSET_ID_TEST, project_id=PROJECT_ID_TEST
)
# Then
assert response
assert response == MessageToDict(response_product_set)
get_product_set_method.assert_called_once_with(name=name, retry=None, timeout=None, metadata=None)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_productset_no_explicit_name(self, get_conn):
# Given
product_set = ProductSet()
update_product_set_method = get_conn.return_value.update_product_set
update_product_set_method.return_value = product_set
productset_name = ProductSearchClient.product_set_path(
PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST
)
# When
result = self.hook.update_product_set(
location=LOC_ID_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_set=product_set,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
assert result == MessageToDict(product_set)
update_product_set_method.assert_called_once_with(
product_set=ProductSet(name=productset_name),
metadata=None,
retry=None,
timeout=None,
update_mask=None,
)
@parameterized.expand([(None, None), (None, PRODUCTSET_ID_TEST), (LOC_ID_TEST, None)])
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_productset_no_explicit_name_and_missing_params_for_constructed_name(
self, location, product_set_id, get_conn
):
# Given
update_product_set_method = get_conn.return_value.update_product_set
update_product_set_method.return_value = None
product_set = ProductSet()
# When
with pytest.raises(AirflowException) as ctx:
self.hook.update_product_set(
location=location,
product_set_id=product_set_id,
product_set=product_set,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
err = ctx.value
assert err
assert ERR_UNABLE_TO_CREATE.format(label='ProductSet', id_label='productset_id') in str(err)
update_product_set_method.assert_not_called()
@parameterized.expand([(None, None), (None, PRODUCTSET_ID_TEST), (LOC_ID_TEST, None)])
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_productset_explicit_name_missing_params_for_constructed_name(
self, location, product_set_id, get_conn
):
# Given
explicit_ps_name = ProductSearchClient.product_set_path(
PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCTSET_ID_TEST_2
)
product_set = ProductSet(name=explicit_ps_name)
update_product_set_method = get_conn.return_value.update_product_set
update_product_set_method.return_value = product_set
# When
result = self.hook.update_product_set(
location=location,
product_set_id=product_set_id,
product_set=product_set,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
assert result == MessageToDict(product_set)
update_product_set_method.assert_called_once_with(
product_set=ProductSet(name=explicit_ps_name),
metadata=None,
retry=None,
timeout=None,
update_mask=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_productset_explicit_name_different_from_constructed(self, get_conn):
# Given
update_product_set_method = get_conn.return_value.update_product_set
update_product_set_method.return_value = None
explicit_ps_name = ProductSearchClient.product_set_path(
PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCTSET_ID_TEST_2
)
product_set = ProductSet(name=explicit_ps_name)
template_ps_name = ProductSearchClient.product_set_path(
PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST
)
# When
# Location and product_set_id are passed in addition to a ProductSet with an explicit name,
# but both names differ (constructed != explicit).
# Should throw AirflowException in this case.
with pytest.raises(AirflowException) as ctx:
self.hook.update_product_set(
location=LOC_ID_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_set=product_set,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
err = ctx.value
# self.assertIn("The required parameter 'project_id' is missing", str(err))
assert err
assert (
ERR_DIFF_NAMES.format(
explicit_name=explicit_ps_name,
constructed_name=template_ps_name,
label="ProductSet",
id_label="productset_id",
)
in str(err)
)
update_product_set_method.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_delete_productset(self, get_conn):
# Given
delete_product_set_method = get_conn.return_value.delete_product_set
delete_product_set_method.return_value = None
name = ProductSearchClient.product_set_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCTSET_ID_TEST)
# When
response = self.hook.delete_product_set(
location=LOC_ID_TEST, product_set_id=PRODUCTSET_ID_TEST, project_id=PROJECT_ID_TEST
)
# Then
assert response is None
delete_product_set_method.assert_called_once_with(name=name, retry=None, timeout=None, metadata=None)
@mock.patch(
'airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn',
**{'return_value.create_reference_image.return_value': REFERENCE_IMAGE_TEST},
)
def test_create_reference_image_explicit_id(self, get_conn):
# Given
create_reference_image_method = get_conn.return_value.create_reference_image
# When
result = self.hook.create_reference_image(
project_id=PROJECT_ID_TEST,
location=LOC_ID_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_WITHOUT_ID_NAME,
reference_image_id=REFERENCE_IMAGE_ID_TEST,
)
# Then
# Product ID was provided explicitly in the method call above, should be returned from the method
assert result == REFERENCE_IMAGE_ID_TEST
create_reference_image_method.assert_called_once_with(
parent=PRODUCT_NAME,
reference_image=REFERENCE_IMAGE_WITHOUT_ID_NAME,
reference_image_id=REFERENCE_IMAGE_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch(
'airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn',
**{'return_value.create_reference_image.return_value': REFERENCE_IMAGE_TEST},
)
def test_create_reference_image_autogenerated_id(self, get_conn):
# Given
create_reference_image_method = get_conn.return_value.create_reference_image
# When
result = self.hook.create_reference_image(
project_id=PROJECT_ID_TEST,
location=LOC_ID_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
reference_image_id=REFERENCE_IMAGE_ID_TEST,
)
# Then
# Product ID was provided explicitly in the method call above, should be returned from the method
assert result == REFERENCE_IMAGE_GEN_ID_TEST
create_reference_image_method.assert_called_once_with(
parent=PRODUCT_NAME,
reference_image=REFERENCE_IMAGE_TEST,
reference_image_id=REFERENCE_IMAGE_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_add_product_to_product_set(self, get_conn):
# Given
add_product_to_product_set_method = get_conn.return_value.add_product_to_product_set
# When
self.hook.add_product_to_product_set(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOC_ID_TEST,
project_id=PROJECT_ID_TEST,
)
# Then
# Product ID was provided explicitly in the method call above, should be returned from the method
add_product_to_product_set_method.assert_called_once_with(
name=PRODUCTSET_NAME_TEST, product=PRODUCT_NAME_TEST, retry=None, timeout=None, metadata=None
)
# remove_product_from_product_set
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_remove_product_from_product_set(self, get_conn):
# Given
remove_product_from_product_set_method = get_conn.return_value.remove_product_from_product_set
# When
self.hook.remove_product_from_product_set(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOC_ID_TEST,
project_id=PROJECT_ID_TEST,
)
# Then
# Product ID was provided explicitly in the method call above, should be returned from the method
remove_product_from_product_set_method.assert_called_once_with(
name=PRODUCTSET_NAME_TEST, product=PRODUCT_NAME_TEST, retry=None, timeout=None, metadata=None
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client')
def test_annotate_image(self, annotator_client_mock):
# Given
annotate_image_method = annotator_client_mock.annotate_image
# When
self.hook.annotate_image(request=ANNOTATE_IMAGE_REQUEST)
# Then
# Product ID was provided explicitly in the method call above, should be returned from the method
annotate_image_method.assert_called_once_with(
request=ANNOTATE_IMAGE_REQUEST, retry=None, timeout=None
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client')
def test_batch_annotate_images(self, annotator_client_mock):
# Given
batch_annotate_images_method = annotator_client_mock.batch_annotate_images
# When
self.hook.batch_annotate_images(requests=BATCH_ANNOTATE_IMAGE_REQUEST)
# Then
# Product ID was provided explicitly in the method call above, should be returned from the method
batch_annotate_images_method.assert_called_once_with(
requests=BATCH_ANNOTATE_IMAGE_REQUEST, retry=None, timeout=None
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_product_explicit_id(self, get_conn):
# Given
create_product_method = get_conn.return_value.create_product
create_product_method.return_value = None
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product = Product()
# When
result = self.hook.create_product(
location=LOC_ID_TEST, product_id=PRODUCT_ID_TEST, product=product, project_id=PROJECT_ID_TEST
)
# Then
# Product ID was provided explicitly in the method call above, should be returned from the method
assert result == PRODUCT_ID_TEST
create_product_method.assert_called_once_with(
parent=parent,
product=product,
product_id=PRODUCT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_product_autogenerated_id(self, get_conn):
# Given
autogenerated_id = 'autogen-p-id'
response_product = Product(
name=ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, autogenerated_id)
)
create_product_method = get_conn.return_value.create_product
create_product_method.return_value = response_product
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product = Product()
# When
result = self.hook.create_product(
location=LOC_ID_TEST, product_id=None, product=product, project_id=PROJECT_ID_TEST
)
# Then
# Product ID was not provided in the method call above. Should be extracted from the API response
# and returned.
assert result == autogenerated_id
create_product_method.assert_called_once_with(
parent=parent, product=product, product_id=None, retry=None, timeout=None, metadata=None
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_product_autogenerated_id_wrong_name_in_response(self, get_conn):
# Given
wrong_name = 'wrong_name_not_a_correct_path'
response_product = Product(name=wrong_name)
create_product_method = get_conn.return_value.create_product
create_product_method.return_value = response_product
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product = Product()
# When
with pytest.raises(AirflowException) as ctx:
self.hook.create_product(
location=LOC_ID_TEST, product_id=None, product=product, project_id=PROJECT_ID_TEST
)
# Then
# API response was wrong (wrong name format) and thus ProductSet ID extraction should fail.
err = ctx.value
assert 'Unable to get id from name' in str(err)
create_product_method.assert_called_once_with(
parent=parent, product=product, product_id=None, retry=None, timeout=None, metadata=None
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_create_product_autogenerated_id_wrong_api_response(self, get_conn):
# Given
response_product = None
create_product_method = get_conn.return_value.create_product
create_product_method.return_value = response_product
parent = ProductSearchClient.location_path(PROJECT_ID_TEST, LOC_ID_TEST)
product = Product()
# When
with pytest.raises(AirflowException) as ctx:
self.hook.create_product(
location=LOC_ID_TEST, product_id=None, product=product, project_id=PROJECT_ID_TEST
)
# Then
# API response was wrong (None) and thus ProductSet ID extraction should fail.
err = ctx.value
assert 'Unable to get name from response...' in str(err)
create_product_method.assert_called_once_with(
parent=parent, product=product, product_id=None, retry=None, timeout=None, metadata=None
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_product_no_explicit_name(self, get_conn):
# Given
product = Product()
update_product_method = get_conn.return_value.update_product
update_product_method.return_value = product
product_name = ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCT_ID_TEST)
# When
result = self.hook.update_product(
location=LOC_ID_TEST,
product_id=PRODUCT_ID_TEST,
product=product,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
assert result == MessageToDict(product)
update_product_method.assert_called_once_with(
product=Product(name=product_name), metadata=None, retry=None, timeout=None, update_mask=None
)
@parameterized.expand([(None, None), (None, PRODUCT_ID_TEST), (LOC_ID_TEST, None)])
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_product_no_explicit_name_and_missing_params_for_constructed_name(
self, location, product_id, get_conn
):
# Given
update_product_method = get_conn.return_value.update_product
update_product_method.return_value = None
product = Product()
# When
with pytest.raises(AirflowException) as ctx:
self.hook.update_product(
location=location,
product_id=product_id,
product=product,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
err = ctx.value
assert err
assert ERR_UNABLE_TO_CREATE.format(label='Product', id_label='product_id') in str(err)
update_product_method.assert_not_called()
@parameterized.expand([(None, None), (None, PRODUCT_ID_TEST), (LOC_ID_TEST, None)])
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_product_explicit_name_missing_params_for_constructed_name(
self, location, product_id, get_conn
):
# Given
explicit_p_name = ProductSearchClient.product_path(
PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCT_ID_TEST_2
)
product = Product(name=explicit_p_name)
update_product_method = get_conn.return_value.update_product
update_product_method.return_value = product
# When
result = self.hook.update_product(
location=location,
product_id=product_id,
product=product,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
# Then
assert result == MessageToDict(product)
update_product_method.assert_called_once_with(
product=Product(name=explicit_p_name), metadata=None, retry=None, timeout=None, update_mask=None
)
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_update_product_explicit_name_different_from_constructed(self, get_conn):
# Given
update_product_method = get_conn.return_value.update_product
update_product_method.return_value = None
explicit_p_name = ProductSearchClient.product_path(
PROJECT_ID_TEST_2, LOC_ID_TEST_2, PRODUCT_ID_TEST_2
)
product = Product(name=explicit_p_name)
template_p_name = ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCT_ID_TEST)
# When
# Location and product_id are passed in addition to a Product with an explicit name,
# but both names differ (constructed != explicit).
# Should throw AirflowException in this case.
with pytest.raises(AirflowException) as ctx:
self.hook.update_product(
location=LOC_ID_TEST,
product_id=PRODUCT_ID_TEST,
product=product,
update_mask=None,
project_id=PROJECT_ID_TEST,
retry=None,
timeout=None,
metadata=None,
)
err = ctx.value
assert err
assert (
ERR_DIFF_NAMES.format(
explicit_name=explicit_p_name,
constructed_name=template_p_name,
label="Product",
id_label="product_id",
)
in str(err)
)
update_product_method.assert_not_called()
@mock.patch('airflow.providers.google.cloud.hooks.vision.CloudVisionHook.get_conn')
def test_delete_product(self, get_conn):
# Given
delete_product_method = get_conn.return_value.delete_product
delete_product_method.return_value = None
name = ProductSearchClient.product_path(PROJECT_ID_TEST, LOC_ID_TEST, PRODUCT_ID_TEST)
# When
response = self.hook.delete_product(
location=LOC_ID_TEST, product_id=PRODUCT_ID_TEST, project_id=PROJECT_ID_TEST
)
# Then
assert response is None
delete_product_method.assert_called_once_with(name=name, retry=None, timeout=None, metadata=None)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_detect_text(self, annotator_client_mock):
# Given
detect_text_method = annotator_client_mock.text_detection
detect_text_method.return_value = AnnotateImageResponse(
text_annotations=[EntityAnnotation(description="test", score=0.5)]
)
# When
self.hook.text_detection(image=DETECT_TEST_IMAGE)
# Then
detect_text_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_detect_text_with_additional_properties(self, annotator_client_mock):
# Given
detect_text_method = annotator_client_mock.text_detection
detect_text_method.return_value = AnnotateImageResponse(
text_annotations=[EntityAnnotation(description="test", score=0.5)]
)
# When
self.hook.text_detection(
image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
)
# Then
detect_text_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_detect_text_with_error_response(self, annotator_client_mock):
# Given
detect_text_method = annotator_client_mock.text_detection
detect_text_method.return_value = AnnotateImageResponse(
error={"code": 3, "message": "test error message"}
)
# When
with pytest.raises(AirflowException) as ctx:
self.hook.text_detection(image=DETECT_TEST_IMAGE)
err = ctx.value
assert "test error message" in str(err)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_document_text_detection(self, annotator_client_mock):
# Given
document_text_detection_method = annotator_client_mock.document_text_detection
document_text_detection_method.return_value = AnnotateImageResponse(
text_annotations=[EntityAnnotation(description="test", score=0.5)]
)
# When
self.hook.document_text_detection(image=DETECT_TEST_IMAGE)
# Then
document_text_detection_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_document_text_detection_with_additional_properties(self, annotator_client_mock):
# Given
document_text_detection_method = annotator_client_mock.document_text_detection
document_text_detection_method.return_value = AnnotateImageResponse(
text_annotations=[EntityAnnotation(description="test", score=0.5)]
)
# When
self.hook.document_text_detection(
image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
)
# Then
document_text_detection_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_detect_document_text_with_error_response(self, annotator_client_mock):
# Given
detect_text_method = annotator_client_mock.document_text_detection
detect_text_method.return_value = AnnotateImageResponse(
error={"code": 3, "message": "test error message"}
)
# When
with pytest.raises(AirflowException) as ctx:
self.hook.document_text_detection(image=DETECT_TEST_IMAGE)
err = ctx.value
assert "test error message" in str(err)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_label_detection(self, annotator_client_mock):
# Given
label_detection_method = annotator_client_mock.label_detection
label_detection_method.return_value = AnnotateImageResponse(
label_annotations=[EntityAnnotation(description="test", score=0.5)]
)
# When
self.hook.label_detection(image=DETECT_TEST_IMAGE)
# Then
label_detection_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_label_detection_with_additional_properties(self, annotator_client_mock):
# Given
label_detection_method = annotator_client_mock.label_detection
label_detection_method.return_value = AnnotateImageResponse(
label_annotations=[EntityAnnotation(description="test", score=0.5)]
)
# When
self.hook.label_detection(
image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
)
# Then
label_detection_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_label_detection_with_error_response(self, annotator_client_mock):
# Given
detect_text_method = annotator_client_mock.label_detection
detect_text_method.return_value = AnnotateImageResponse(
error={"code": 3, "message": "test error message"}
)
# When
with pytest.raises(AirflowException) as ctx:
self.hook.label_detection(image=DETECT_TEST_IMAGE)
err = ctx.value
assert "test error message" in str(err)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_safe_search_detection(self, annotator_client_mock):
# Given
safe_search_detection_method = annotator_client_mock.safe_search_detection
safe_search_detection_method.return_value = AnnotateImageResponse(
safe_search_annotation=SafeSearchAnnotation(
adult="VERY_UNLIKELY",
spoof="VERY_UNLIKELY",
medical="VERY_UNLIKELY",
violence="VERY_UNLIKELY",
racy="VERY_UNLIKELY",
)
)
# When
self.hook.safe_search_detection(image=DETECT_TEST_IMAGE)
# Then
safe_search_detection_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_safe_search_detection_with_additional_properties(self, annotator_client_mock):
# Given
safe_search_detection_method = annotator_client_mock.safe_search_detection
safe_search_detection_method.return_value = AnnotateImageResponse(
safe_search_annotation=SafeSearchAnnotation(
adult="VERY_UNLIKELY",
spoof="VERY_UNLIKELY",
medical="VERY_UNLIKELY",
violence="VERY_UNLIKELY",
racy="VERY_UNLIKELY",
)
)
# When
self.hook.safe_search_detection(
image=DETECT_TEST_IMAGE, additional_properties={"prop1": "test1", "prop2": "test2"}
)
# Then
safe_search_detection_method.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, prop1="test1", prop2="test2"
)
@mock.patch("airflow.providers.google.cloud.hooks.vision.CloudVisionHook.annotator_client")
def test_safe_search_detection_with_error_response(self, annotator_client_mock):
# Given
detect_text_method = annotator_client_mock.safe_search_detection
detect_text_method.return_value = AnnotateImageResponse(
error={"code": 3, "message": "test error message"}
)
# When
with pytest.raises(AirflowException) as ctx:
self.hook.safe_search_detection(image=DETECT_TEST_IMAGE)
err = ctx.value
assert "test error message" in str(err)
| apache/incubator-airflow | tests/providers/google/cloud/hooks/test_vision.py | Python | apache-2.0 | 38,031 |
from django.db import models
from django.utils.html import format_html
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.fields import ImageField
from sno.models import Sno
class SnoGalleries(models.Model):
class Meta:
verbose_name = 'Фотография в галереи СНО'
verbose_name_plural = 'Фотографии в галереи СНО'
name = models.CharField('Название фото', max_length=255, blank=True, null=True)
photo = ImageField(verbose_name='Фото', max_length=255)
description = models.TextField('Описание', blank=True, null=True)
sno = models.ForeignKey(Sno, verbose_name='СНО', on_delete=models.CASCADE)
date_created = models.DateField('Дата', auto_now_add=True)
def photo_preview(self):
img = get_thumbnail(self.photo, '75x75', crop='center')
return format_html('<a href="{}" target="_blank"><img style="width:75px; height:75px;" src="{}"></a>',
self.photo.url, img.url)
photo_preview.short_description = 'Фото'
def __str__(self):
return '%s (%s)' % (self.name, self.sno.short_name)
| glad-web-developer/zab_sno | src/sno_galleries/models.py | Python | apache-2.0 | 1,164 |
# Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for network related helpers."""
import socket
def get_ip():
"""Get primary IP (the one with a default route) of local machine.
This works on both Linux and Windows platforms, and doesn't require working
internet connection.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
except:
return '127.0.0.1'
finally:
s.close()
| google/flight-lab | controller/common/net.py | Python | apache-2.0 | 1,062 |
import functools
import warnings
from collections import Mapping, Sequence
from numbers import Number
import numpy as np
import pandas as pd
from . import ops
from . import utils
from . import common
from . import groupby
from . import indexing
from . import alignment
from . import formatting
from .. import conventions
from .alignment import align, partial_align
from .coordinates import DatasetCoordinates, Indexes
from .common import ImplementsDatasetReduce, BaseDataObject
from .utils import (Frozen, SortedKeysDict, ChainMap, maybe_wrap_array)
from .variable import as_variable, Variable, Coordinate, broadcast_variables
from .pycompat import (iteritems, itervalues, basestring, OrderedDict,
dask_array_type)
from .combine import concat
# list of attributes of pd.DatetimeIndex that are ndarrays of time info
_DATETIMEINDEX_COMPONENTS = ['year', 'month', 'day', 'hour', 'minute',
'second', 'microsecond', 'nanosecond', 'date',
'time', 'dayofyear', 'weekofyear', 'dayofweek',
'quarter']
def _get_virtual_variable(variables, key):
"""Get a virtual variable (e.g., 'time.year') from a dict of xray.Variable
objects (if possible)
"""
if not isinstance(key, basestring):
raise KeyError(key)
split_key = key.split('.', 1)
if len(split_key) != 2:
raise KeyError(key)
ref_name, var_name = split_key
ref_var = variables[ref_name]
if ref_var.ndim == 1:
date = ref_var.to_index()
elif ref_var.ndim == 0:
date = pd.Timestamp(ref_var.values)
else:
raise KeyError(key)
if var_name == 'season':
# TODO: move 'season' into pandas itself
seasons = np.array(['DJF', 'MAM', 'JJA', 'SON'])
month = date.month
data = seasons[(month // 3) % 4]
else:
data = getattr(date, var_name)
return ref_name, var_name, Variable(ref_var.dims, data)
def _as_dataset_variable(name, var):
"""Prepare a variable for adding it to a Dataset
"""
try:
var = as_variable(var, key=name)
except TypeError:
raise TypeError('Dataset variables must be an array or a tuple of '
'the form (dims, data[, attrs, encoding])')
if name in var.dims:
# convert the into an Index
if var.ndim != 1:
raise ValueError('an index variable must be defined with '
'1-dimensional data')
var = var.to_coord()
return var
def _align_variables(variables, join='outer'):
"""Align all DataArrays in the provided dict, leaving other values alone.
"""
alignable = [k for k, v in variables.items() if hasattr(v, 'indexes')]
aligned = align(*[variables[a] for a in alignable],
join=join, copy=False)
new_variables = OrderedDict(variables)
new_variables.update(zip(alignable, aligned))
return new_variables
def _expand_variables(raw_variables, old_variables={}, compat='identical'):
"""Expand a dictionary of variables.
Returns a dictionary of Variable objects suitable for inserting into a
Dataset._variables dictionary.
This includes converting tuples (dims, data) into Variable objects,
converting coordinate variables into Coordinate objects and expanding
DataArray objects into Variables plus coordinates.
Raises ValueError if any conflicting values are found, between any of the
new or old variables.
"""
new_variables = OrderedDict()
new_coord_names = set()
variables = ChainMap(new_variables, old_variables)
def maybe_promote_or_replace(name, var):
existing_var = variables[name]
if name not in existing_var.dims:
if name in var.dims:
variables[name] = var
else:
common_dims = OrderedDict(zip(existing_var.dims,
existing_var.shape))
common_dims.update(zip(var.dims, var.shape))
variables[name] = existing_var.expand_dims(common_dims)
new_coord_names.update(var.dims)
def add_variable(name, var):
var = _as_dataset_variable(name, var)
if name not in variables:
variables[name] = var
new_coord_names.update(variables[name].dims)
else:
if not getattr(variables[name], compat)(var):
raise ValueError('conflicting value for variable %s:\n'
'first value: %r\nsecond value: %r'
% (name, variables[name], var))
if compat == 'broadcast_equals':
maybe_promote_or_replace(name, var)
for name, var in iteritems(raw_variables):
if hasattr(var, 'coords'):
# it's a DataArray
new_coord_names.update(var.coords)
for dim, coord in iteritems(var.coords):
if dim != name:
add_variable(dim, coord.variable)
var = var.variable
add_variable(name, var)
return new_variables, new_coord_names
def _calculate_dims(variables):
"""Calculate the dimensions corresponding to a set of variables.
Returns dictionary mapping from dimension names to sizes. Raises ValueError
if any of the dimension sizes conflict.
"""
dims = {}
last_used = {}
scalar_vars = set(k for k, v in iteritems(variables) if not v.dims)
for k, var in iteritems(variables):
for dim, size in zip(var.dims, var.shape):
if dim in scalar_vars:
raise ValueError('dimension %s already exists as a scalar '
'variable' % dim)
if dim not in dims:
dims[dim] = size
last_used[dim] = k
elif dims[dim] != size:
raise ValueError('conflicting sizes for dimension %r: '
'length %s on %r and length %s on %r' %
(dim, size, k, dims[dim], last_used[dim]))
return dims
def _merge_expand(aligned_self, other, overwrite_vars, compat):
possible_conflicts = dict((k, v) for k, v in aligned_self._variables.items()
if k not in overwrite_vars)
new_vars, new_coord_names = _expand_variables(other, possible_conflicts, compat)
replace_vars = aligned_self._variables.copy()
replace_vars.update(new_vars)
return replace_vars, new_vars, new_coord_names
def _merge_dataset(self, other, overwrite_vars, compat, join):
aligned_self, other = partial_align(self, other, join=join, copy=False)
replace_vars, new_vars, new_coord_names = _merge_expand(
aligned_self, other._variables, overwrite_vars, compat)
new_coord_names.update(other._coord_names)
return replace_vars, new_vars, new_coord_names
def _merge_dict(self, other, overwrite_vars, compat, join):
other = _align_variables(other, join='outer')
alignable = [k for k, v in other.items() if hasattr(v, 'indexes')]
aligned = partial_align(self, *[other[a] for a in alignable],
join=join, copy=False, exclude=overwrite_vars)
aligned_self = aligned[0]
other = OrderedDict(other)
other.update(zip(alignable, aligned[1:]))
return _merge_expand(aligned_self, other, overwrite_vars, compat)
def _assert_empty(args, msg='%s'):
if args:
raise ValueError(msg % args)
def as_dataset(obj):
"""Cast the given object to a Dataset.
Handles DataArrays, Datasets and dictionaries of variables. A new Dataset
object is only created in the last case.
"""
obj = getattr(obj, '_dataset', obj)
if not isinstance(obj, Dataset):
obj = Dataset(obj)
return obj
class Variables(Mapping):
def __init__(self, dataset):
self._dataset = dataset
def __iter__(self):
return (key for key in self._dataset._variables
if key not in self._dataset._coord_names)
def __len__(self):
return len(self._dataset._variables) - len(self._dataset._coord_names)
def __contains__(self, key):
return (key in self._dataset._variables
and key not in self._dataset._coord_names)
def __getitem__(self, key):
if key not in self._dataset._coord_names:
return self._dataset[key]
else:
raise KeyError(key)
def __repr__(self):
return formatting.vars_repr(self)
class _LocIndexer(object):
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, key):
if not utils.is_dict_like(key):
raise TypeError('can only lookup dictionaries from Dataset.loc')
return self.dataset.sel(**key)
class Dataset(Mapping, ImplementsDatasetReduce, BaseDataObject):
"""A multi-dimensional, in memory, array database.
A dataset resembles an in-memory representation of a NetCDF file, and
consists of variables, coordinates and attributes which together form a
self describing dataset.
Dataset implements the mapping interface with keys given by variable names
and values given by DataArray objects for each variable name.
One dimensional variables with name equal to their dimension are index
coordinates used for label based indexing.
"""
# class properties defined for the benefit of __setstate__, which otherwise
# runs into trouble because we overrode __getattr__
_attrs = None
_variables = Frozen({})
groupby_cls = groupby.DatasetGroupBy
def __init__(self, variables=None, coords=None, attrs=None,
compat='broadcast_equals'):
"""To load data from a file or file-like object, use the `open_dataset`
function.
Parameters
----------
variables : dict-like, optional
A mapping from variable names to :py:class:`~xray.DataArray`
objects, :py:class:`~xray.Variable` objects or tuples of the
form ``(dims, data[, attrs])`` which can be used as arguments to
create a new ``Variable``. Each dimension must have the same length
in all variables in which it appears.
coords : dict-like, optional
Another mapping in the same form as the `variables` argument,
except the each item is saved on the dataset as a "coordinate".
These variables have an associated meaning: they describe
constant/fixed/independent quantities, unlike the
varying/measured/dependent quantities that belong in `variables`.
Coordinates values may be given by 1-dimensional arrays or scalars,
in which case `dims` do not need to be supplied: 1D arrays will be
assumed to give index values along the dimension with the same
name.
attrs : dict-like, optional
Global attributes to save on this dataset.
compat : {'broadcast_equals', 'equals', 'identical'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
"""
self._variables = OrderedDict()
self._coord_names = set()
self._dims = {}
self._attrs = None
self._file_obj = None
if variables is None:
variables = {}
if coords is None:
coords = set()
if variables or coords:
self._set_init_vars_and_dims(variables, coords, compat)
if attrs is not None:
self.attrs = attrs
def _add_missing_coords_inplace(self):
"""Add missing coordinates to self._variables
"""
for dim, size in iteritems(self.dims):
if dim not in self._variables:
# This is equivalent to np.arange(size), but
# waits to create the array until its actually accessed.
data = indexing.LazyIntegerRange(size)
coord = Coordinate(dim, data)
self._variables[dim] = coord
def _update_vars_and_coords(self, new_variables, new_coord_names={},
needs_copy=True, check_coord_names=True):
"""Add a dictionary of new variables to this dataset.
Raises a ValueError if any dimensions have conflicting lengths in the
new dataset. Otherwise will update this dataset's _variables and
_dims attributes in-place.
Set `needs_copy=False` only if this dataset is brand-new and hence
can be thrown away if this method fails.
"""
# default to creating another copy of variables so can unroll if we end
# up with inconsistent dimensions
variables = self._variables.copy() if needs_copy else self._variables
if check_coord_names:
_assert_empty([k for k in self.data_vars if k in new_coord_names],
'coordinates with these names already exist as '
'variables: %s')
variables.update(new_variables)
dims = _calculate_dims(variables)
# all checks are complete: it's safe to update
self._variables = variables
self._dims = dims
self._add_missing_coords_inplace()
self._coord_names.update(new_coord_names)
def _set_init_vars_and_dims(self, vars, coords, compat):
"""Set the initial value of Dataset variables and dimensions
"""
_assert_empty([k for k in vars if k in coords],
'redundant variables and coordinates: %s')
variables = ChainMap(vars, coords)
aligned = _align_variables(variables)
new_variables, new_coord_names = _expand_variables(aligned,
compat=compat)
new_coord_names.update(coords)
self._update_vars_and_coords(new_variables, new_coord_names,
needs_copy=False, check_coord_names=False)
@classmethod
def load_store(cls, store, decoder=None):
"""Create a new dataset from the contents of a backends.*DataStore
object
"""
variables, attributes = store.load()
if decoder:
variables, attributes = decoder(variables, attributes)
obj = cls(variables, attrs=attributes)
obj._file_obj = store
return obj
def close(self):
"""Close any files linked to this dataset
"""
if self._file_obj is not None:
self._file_obj.close()
self._file_obj = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getstate__(self):
"""Always load data in-memory before pickling"""
self.load()
# self.__dict__ is the default pickle object, we don't need to
# implement our own __setstate__ method to make pickle work
state = self.__dict__.copy()
# throw away any references to datastores in the pickle
state['_file_obj'] = None
return state
@property
def variables(self):
"""Frozen dictionary of xray.Variable objects constituting this
dataset's data
"""
return Frozen(self._variables)
def _attrs_copy(self):
return None if self._attrs is None else OrderedDict(self._attrs)
@property
def attrs(self):
"""Dictionary of global attributes on this dataset
"""
if self._attrs is None:
self._attrs = OrderedDict()
return self._attrs
@attrs.setter
def attrs(self, value):
self._attrs = OrderedDict(value)
@property
def dims(self):
"""Mapping from dimension names to lengths.
This dictionary cannot be modified directly, but is updated when adding
new variables.
"""
return Frozen(SortedKeysDict(self._dims))
def load(self):
"""Manually trigger loading of this dataset's data from disk or a
remote source into memory and return this dataset.
Normally, it should not be necessary to call this method in user code,
because all xray functions should either work on deferred data or
load data automatically. However, this method can be necessary when
working with many file objects on disk.
"""
# access .data to coerce everything to numpy or dask arrays
all_data = dict((k, v.data) for k, v in self.variables.items())
lazy_data = dict((k, v) for k, v in all_data.items()
if isinstance(v, dask_array_type))
if lazy_data:
import dask.array as da
# evaluate all the dask arrays simultaneously
evaluated_data = da.compute(*lazy_data.values())
evaluated_variables = {}
for k, data in zip(lazy_data, evaluated_data):
self.variables[k].data = data
return self
def load_data(self): # pragma: no cover
warnings.warn('the Dataset method `load_data` has been deprecated; '
'use `load` instead',
FutureWarning, stacklevel=2)
return self.load()
@classmethod
def _construct_direct(cls, variables, coord_names, dims, attrs,
file_obj=None):
"""Shortcut around __init__ for internal use when we want to skip
costly validation
"""
obj = object.__new__(cls)
obj._variables = variables
obj._coord_names = coord_names
obj._dims = dims
obj._attrs = attrs
obj._file_obj = file_obj
return obj
__default_attrs = object()
def _replace_vars_and_dims(self, variables, coord_names=None,
attrs=__default_attrs, inplace=False):
"""Fastpath constructor for internal use.
Preserves coord names and attributes; dimensions are recalculated from
the supplied variables.
The arguments are *not* copied when placed on the new dataset. It is up
to the caller to ensure that they have the right type and are not used
elsewhere.
Parameters
----------
variables : OrderedDict
coord_names : set or None, optional
attrs : OrderedDict or None, optional
Returns
-------
new : Dataset
"""
dims = _calculate_dims(variables)
if inplace:
self._dims = dims
self._variables = variables
if coord_names is not None:
self._coord_names = coord_names
if attrs is not self.__default_attrs:
self._attrs = attrs
obj = self
else:
if coord_names is None:
coord_names = self._coord_names.copy()
if attrs is self.__default_attrs:
attrs = self._attrs_copy()
obj = self._construct_direct(variables, coord_names, dims, attrs)
return obj
def copy(self, deep=False):
"""Returns a copy of this dataset.
If `deep=True`, a deep copy is made of each of the component variables.
Otherwise, a shallow copy is made, so each variable in the new dataset
is also a variable in the original dataset.
"""
if deep:
variables = OrderedDict((k, v.copy(deep=True))
for k, v in iteritems(self._variables))
else:
variables = self._variables.copy()
# skip __init__ to avoid costly validation
return self._construct_direct(variables, self._coord_names.copy(),
self._dims.copy(), self._attrs_copy())
def _copy_listed(self, names, keep_attrs=True):
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""
variables = OrderedDict()
coord_names = set()
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name)
variables[var_name] = var
if ref_name in self._coord_names:
coord_names.add(var_name)
needed_dims = set()
for v in variables.values():
needed_dims.update(v._dims)
for k in self._coord_names:
if set(self._variables[k]._dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
dims = dict((k, self._dims[k]) for k in needed_dims)
attrs = self.attrs.copy() if keep_attrs else None
return self._construct_direct(variables, coord_names, dims, attrs)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
def __contains__(self, key):
"""The 'in' operator will return true or false depending on whether
'key' is an array in the dataset or not.
"""
return key in self._variables
def __len__(self):
return len(self._variables)
def __iter__(self):
return iter(self._variables)
@property
def nbytes(self):
return sum(v.nbytes for v in self.variables.values())
@property
def loc(self):
"""Attribute for location based indexing. Only supports __getitem__,
and only when the key is a dict of the form {dim: labels}.
"""
return _LocIndexer(self)
def __getitem__(self, key):
"""Access variables or coordinates this dataset as a
:py:class:`~xray.DataArray`.
Indexing with a list of names will return a new ``Dataset`` object.
"""
from .dataarray import DataArray
if utils.is_dict_like(key):
return self.isel(**key)
key = np.asarray(key)
if key.ndim == 0:
return DataArray._new_from_dataset(self, key.item())
else:
return self._copy_listed(key)
def __setitem__(self, key, value):
"""Add an array to this dataset.
If value is a `DataArray`, call its `select_vars()` method, rename it
to `key` and merge the contents of the resulting dataset into this
dataset.
If value is an `Variable` object (or tuple of form
``(dims, data[, attrs])``), add it to this dataset as a new
variable.
"""
if utils.is_dict_like(key):
raise NotImplementedError('cannot yet use a dictionary as a key '
'to set Dataset values')
self.update({key: value})
def __delitem__(self, key):
"""Remove a variable from this dataset.
If this variable is a dimension, all variables containing this
dimension are also removed.
"""
def remove(k):
del self._variables[k]
self._coord_names.discard(k)
remove(key)
if key in self._dims:
del self._dims[key]
also_delete = [k for k, v in iteritems(self._variables)
if key in v.dims]
for key in also_delete:
remove(key)
# mutable objects should not be hashable
__hash__ = None
def _all_compat(self, other, compat_str):
"""Helper function for equals and identical"""
# some stores (e.g., scipy) do not seem to preserve order, so don't
# require matching order for equality
compat = lambda x, y: getattr(x, compat_str)(y)
return (self._coord_names == other._coord_names
and utils.dict_equiv(self._variables, other._variables,
compat=compat))
def broadcast_equals(self, other):
"""Two Datasets are broadcast equal if they are equal after
broadcasting all variables against each other.
For example, variables that are scalar in one dataset but non-scalar in
the other dataset can still be broadcast equal if the the non-scalar
variable is a constant.
See Also
--------
Dataset.equals
Dataset.identical
"""
try:
return self._all_compat(other, 'broadcast_equals')
except (TypeError, AttributeError):
return False
def equals(self, other):
"""Two Datasets are equal if they have matching variables and
coordinates, all of which are equal.
Datasets can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for ``Dataset``
does element-wise comparisions (like numpy.ndarrays).
See Also
--------
Dataset.broadcast_equals
Dataset.identical
"""
try:
return self._all_compat(other, 'equals')
except (TypeError, AttributeError):
return False
def identical(self, other):
"""Like equals, but also checks all dataset attributes and the
attributes on all variables and coordinates.
See Also
--------
Dataset.broadcast_equals
Dataset.equals
"""
try:
return (utils.dict_equiv(self.attrs, other.attrs)
and self._all_compat(other, 'identical'))
except (TypeError, AttributeError):
return False
@property
def indexes(self):
"""OrderedDict of pandas.Index objects used for label based indexing
"""
return Indexes(self)
@property
def coords(self):
"""Dictionary of xray.DataArray objects corresponding to coordinate
variables
"""
return DatasetCoordinates(self)
@property
def data_vars(self):
"""Dictionary of xray.DataArray objects corresponding to data variables
"""
return Variables(self)
@property
def vars(self): # pragma: no cover
warnings.warn('the Dataset property `vars` has been deprecated; '
'use `data_vars` instead',
FutureWarning, stacklevel=2)
return self.data_vars
def set_coords(self, names, inplace=False):
"""Given names of one or more variables, set them as coordinates
Parameters
----------
names : str or list of str
Name(s) of variables in this dataset to convert into coordinates.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
# TODO: allow inserting new coordinates with this method, like
# DataFrame.set_index?
# nb. check in self._variables, not self.data_vars to insure that the
# operation is idempotent
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
obj = self if inplace else self.copy()
obj._coord_names.update(names)
return obj
def reset_coords(self, names=None, drop=False, inplace=False):
"""Given names of coordinates, reset them to become variables
Parameters
----------
names : str or list of str, optional
Name(s) of non-index coordinates in this dataset to reset into
variables. By default, all non-index coordinates are reset.
drop : bool, optional
If True, remove coordinates instead of converting them into
variables.
inplace : bool, optional
If True, modify this dataset inplace. Otherwise, create a new
object.
Returns
-------
Dataset
"""
if names is None:
names = self._coord_names - set(self.dims)
else:
if isinstance(names, basestring):
names = [names]
self._assert_all_in_dataset(names)
_assert_empty(
set(names) & set(self.dims),
'cannot remove index coordinates with reset_coords: %s')
obj = self if inplace else self.copy()
obj._coord_names.difference_update(names)
if drop:
for name in names:
del obj._variables[name]
return obj
def dump_to_store(self, store, encoder=None, sync=True):
"""Store dataset contents to a backends.*DataStore object."""
variables, attrs = conventions.encode_dataset_coordinates(self)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs)
if sync:
store.sync()
def to_netcdf(self, path=None, mode='w', format=None, group=None,
engine=None):
"""Write dataset contents to a netCDF file.
Parameters
----------
path : str, optional
Path to which to save this dataset. If no path is provided, this
function returns the resulting netCDF file as a bytes object; in
this case, we need to use scipy.io.netcdf, which does not support
netCDF version 4 (the default format becomes NETCDF3_64BIT).
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
this location will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatibile API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
format='NETCDF4'). The group(s) will be created if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
"""
from ..backends.api import to_netcdf
return to_netcdf(self, path, mode, format, group, engine)
dump = utils.function_alias(to_netcdf, 'dumps')
dumps = utils.function_alias(to_netcdf, 'dumps')
def __repr__(self):
return formatting.dataset_repr(self)
@property
def chunks(self):
"""Block dimensions for this dataset's data or None if it's not a dask
array.
"""
chunks = {}
for v in self.variables.values():
if v.chunks is not None:
new_chunks = list(zip(v.dims, v.chunks))
if any(chunk != chunks[d] for d, chunk in new_chunks
if d in chunks):
raise ValueError('inconsistent chunks')
chunks.update(new_chunks)
return Frozen(SortedKeysDict(chunks))
def chunk(self, chunks=None, lock=False):
"""Coerce all arrays in this dataset into dask arrays with the given
chunks.
Non-dask arrays in this dataset will be converted to dask arrays. Dask
arrays will be rechunked to the given chunk sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int or dict, optional
Chunk sizes along each dimension, e.g., ``5`` or
``{'x': 5, 'y': 5}``.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xray.Dataset
"""
if isinstance(chunks, Number):
chunks = dict.fromkeys(self.dims, chunks)
if chunks is not None:
bad_dims = [d for d in chunks if d not in self.dims]
if bad_dims:
raise ValueError('some chunks keys are not dimensions on this '
'object: %s' % bad_dims)
def selkeys(dict_, keys):
if dict_ is None:
return None
return dict((d, dict_[d]) for d in keys if d in dict_)
def maybe_chunk(name, var, chunks):
chunks = selkeys(chunks, var.dims)
if not chunks:
chunks = None
if var.ndim > 0:
return var.chunk(chunks, name=name, lock=lock)
else:
return var
variables = OrderedDict([(k, maybe_chunk(k, v, chunks))
for k, v in self.variables.items()])
return self._replace_vars_and_dims(variables)
def isel(self, **indexers):
"""Returns a new dataset with each array indexed along the specified
dimension(s).
This method selects values from each array using its `__getitem__`
method, except this method does not require knowing the order of
each array's dimensions.
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. In
general, each array's data will be a view of the array's data
in this dataset, unless numpy fancy indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.sel
DataArray.isel
DataArray.sel
"""
invalid = [k for k in indexers if not k in self.dims]
if invalid:
raise ValueError("dimensions %r do not exist" % invalid)
# all indexers should be int, slice or np.ndarrays
indexers = [(k, (np.asarray(v)
if not isinstance(v, (int, np.integer, slice))
else v))
for k, v in iteritems(indexers)]
variables = OrderedDict()
for name, var in iteritems(self._variables):
var_indexers = dict((k, v) for k, v in indexers if k in var.dims)
variables[name] = var.isel(**var_indexers)
return self._replace_vars_and_dims(variables)
def sel(self, method=None, **indexers):
"""Returns a new dataset with each array indexed by tick labels
along the specified dimension(s).
In contrast to `Dataset.isel`, indexers for this method should use
labels instead of integers.
Under the hood, this method is powered by using Panda's powerful Index
objects. This makes label based indexing essentially just as fast as
using integer indexing.
It also means this method uses pandas's (well documented) logic for
indexing. This means you can use string shortcuts for datetime indexes
(e.g., '2000-01' to select all values in January 2000). It also means
that slices are treated as inclusive of both the start and stop values,
unlike normal Python indexing.
Parameters
----------
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for inexact matches (requires pandas>=0.16):
* default: only exact matches
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by scalars, slices or arrays of tick labels.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
variable and dimension is indexed by the appropriate indexers. In
general, each variable's data will be a view of the variable's data
in this dataset, unless numpy fancy indexing was triggered by using
an array indexer, in which case the data will be a copy.
See Also
--------
Dataset.isel
DataArray.isel
DataArray.sel
"""
return self.isel(**indexing.remap_label_indexers(self, indexers,
method=method))
def isel_points(self, dim='points', **indexers):
"""Returns a new dataset with each array indexed pointwise along the
specified dimension(s).
This method selects pointwise values from each array and is akin to
the NumPy indexing behavior of `arr[[0, 1], [0, 1]]`, except this
method does not require knowing the order of each array's dimensions.
Parameters
----------
dim : str or DataArray or pandas.Index or other list-like object, optional
Name of the dimension to concatenate along. If dim is provided as a
string, it must be a new dimension name, in which case it is added
along axis=0. If dim is provided as a DataArray or Index or
list-like object, its name, which must not be present in the
dataset, is used as the dimension to concatenate along and the
values are added as a coordinate.
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by array-like objects. All indexers must be the same length and
1 dimensional.
Returns
-------
obj : Dataset
A new Dataset with the same contents as this dataset, except each
array and dimension is indexed by the appropriate indexers. With
pointwise indexing, the new Dataset will always be a copy of the
original.
See Also
--------
Dataset.sel
DataArray.isel
DataArray.sel
DataArray.isel_points
"""
indexer_dims = set(indexers)
def relevant_keys(mapping):
return [k for k, v in mapping.items()
if any(d in indexer_dims for d in v.dims)]
data_vars = relevant_keys(self.data_vars)
coords = relevant_keys(self.coords)
# all the indexers should be iterables
keys = indexers.keys()
indexers = [(k, np.asarray(v)) for k, v in iteritems(indexers)]
# Check that indexers are valid dims, integers, and 1D
for k, v in indexers:
if k not in self.dims:
raise ValueError("dimension %s does not exist" % k)
if v.dtype.kind != 'i':
raise TypeError('Indexers must be integers')
if v.ndim != 1:
raise ValueError('Indexers must be 1 dimensional')
# all the indexers should have the same length
lengths = set(len(v) for k, v in indexers)
if len(lengths) > 1:
raise ValueError('All indexers must be the same length')
# Existing dimensions are not valid choices for the dim argument
if isinstance(dim, basestring):
if dim in self.dims:
# dim is an invalid string
raise ValueError('Existing dimension names are not valid '
'choices for the dim argument in sel_points')
elif hasattr(dim, 'dims'):
# dim is a DataArray or Coordinate
if dim.name in self.dims:
# dim already exists
raise ValueError('Existing dimensions are not valid choices '
'for the dim argument in sel_points')
else:
# try to cast dim to DataArray with name = points
from .dataarray import DataArray
dim = DataArray(dim, dims='points', name='points')
# TODO: This would be sped up with vectorized indexing. This will
# require dask to support pointwise indexing as well.
return concat([self.isel(**d) for d in
[dict(zip(keys, inds)) for inds in
zip(*[v for k, v in indexers])]],
dim=dim, coords=coords, data_vars=data_vars)
def reindex_like(self, other, method=None, copy=True):
"""Conform this object onto the indexes of another object, filling
in missing values with NaN.
Parameters
----------
other : Dataset or DataArray
Object with an 'indexes' attribute giving a mapping from dimension
names to pandas.Index objects, which provides coordinates upon
which to index the variables in this dataset. The indexes on this
other object need not be the same as the indexes on this
dataset. Any mis-matched index values will be filled in with
NaN, and any mis-matched dimension names will simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values from other not found in this
dataset:
* default: don't fill gaps
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
copy : bool, optional
If `copy=True`, the returned dataset contains only copied
variables. If `copy=False` and no reindexing is required then
original variables from this dataset are returned.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but coordinates from the
other object.
See Also
--------
Dataset.reindex
align
"""
return self.reindex(method=method, copy=copy, **other.indexes)
def reindex(self, indexers=None, method=None, copy=True, **kw_indexers):
"""Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
indexers : dict. optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate values
will be filled in with NaN, and any mis-matched dimension names will
simply be ignored.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* default: don't fill gaps
* pad / ffill: propgate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
copy : bool, optional
If `copy=True`, the returned dataset contains only copied
variables. If `copy=False` and no reindexing is required then
original variables from this dataset are returned.
**kw_indexers : optional
Keyword arguments in the same form as ``indexers``.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reindex_like
align
pandas.Index.get_indexer
"""
indexers = utils.combine_pos_and_kw_args(indexers, kw_indexers,
'reindex')
if not indexers:
# shortcut
return self.copy(deep=True) if copy else self
variables = alignment.reindex_variables(
self.variables, self.indexes, indexers, method, copy=copy)
return self._replace_vars_and_dims(variables)
def rename(self, name_dict, inplace=False):
"""Returns a new object with renamed variables and dimensions.
Parameters
----------
name_dict : dict-like
Dictionary whose keys are current variable or dimension names and
whose values are new names.
inplace : bool, optional
If True, rename variables and dimensions in-place. Otherwise,
return a new dataset object.
Returns
-------
renamed : Dataset
Dataset with renamed variables and dimensions.
See Also
--------
Dataset.swap_dims
DataArray.rename
"""
for k in name_dict:
if k not in self:
raise ValueError("cannot rename %r because it is not a "
"variable in this dataset" % k)
variables = OrderedDict()
coord_names = set()
for k, v in iteritems(self._variables):
name = name_dict.get(k, k)
dims = tuple(name_dict.get(dim, dim) for dim in v.dims)
var = v.copy(deep=False)
var.dims = dims
variables[name] = var
if k in self._coord_names:
coord_names.add(name)
return self._replace_vars_and_dims(variables, coord_names,
inplace=inplace)
def swap_dims(self, dims_dict, inplace=False):
"""Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new dataset
object.
Returns
-------
renamed : Dataset
Dataset with swapped dimensions.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError('cannot swap from dimension %r because it is '
'not an existing dimension' % k)
if self.variables[v].dims != (k,):
raise ValueError('replacement dimension %r is not a 1D '
'variable along the old dimension %r'
% (v, k))
result_dims = set(dims_dict.get(dim, dim) for dim in self.dims)
variables = OrderedDict()
coord_names = self._coord_names.copy()
coord_names.update(dims_dict.values())
for k, v in iteritems(self.variables):
dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
var = v.to_coord() if k in result_dims else v.to_variable()
var.dims = dims
variables[k] = var
return self._replace_vars_and_dims(variables, coord_names,
inplace=inplace)
def update(self, other, inplace=True):
"""Update this dataset's variables with those from another dataset.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables with which to update this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
Returns
-------
updated : Dataset
Updated dataset.
Raises
------
ValueError
If any dimensions would have inconsistent sizes in the updated
dataset.
"""
return self.merge(
other, inplace=inplace, overwrite_vars=list(other), join='left')
def merge(self, other, inplace=False, overwrite_vars=set(),
compat='broadcast_equals', join='outer'):
"""Merge the arrays of two datasets into a single dataset.
This method generally not allow for overriding data, with the exception
of attributes, which are ignored on the second dataset. Variables with
the same name are checked for conflicts via the equals or identical
methods.
Parameters
----------
other : Dataset or castable to Dataset
Dataset or variables to merge with this dataset.
inplace : bool, optional
If True, merge the other dataset into this dataset in-place.
Otherwise, return a new dataset object.
overwrite_vars : str or sequence, optional
If provided, update variables of these name(s) without checking for
conflicts in this dataset.
compat : {'broadcast_equals', 'equals', 'identical'}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- 'equals': all values and dimensions must be the same.
- 'identical': all values, dimensions and attributes must be the
same.
join : {'outer', 'inner', 'left', 'right'}, optional
Method for joining ``self`` and ``other`` along shared dimensions:
- 'outer': use the union of the indexes
- 'inner': use the intersection of the indexes
- 'left': use indexes from ``self``
- 'right': use indexes from ``other``
Returns
-------
merged : Dataset
Merged dataset.
Raises
------
ValueError
If any variables conflict (see ``compat``).
"""
if compat not in ['broadcast_equals', 'equals', 'identical']:
raise ValueError("compat=%r invalid: must be 'broadcast_equals', "
"'equals' or 'identical'" % compat)
if isinstance(overwrite_vars, basestring):
overwrite_vars = [overwrite_vars]
overwrite_vars = set(overwrite_vars)
merge = _merge_dataset if isinstance(other, Dataset) else _merge_dict
replace_vars, new_vars, new_coord_names = merge(
self, other, overwrite_vars, compat=compat, join=join)
newly_coords = new_coord_names & (set(self) - set(self.coords))
no_longer_coords = set(self.coords) & (set(new_vars) - new_coord_names)
ambiguous_coords = (newly_coords | no_longer_coords) - overwrite_vars
if ambiguous_coords:
raise ValueError('cannot merge: the following variables are '
'coordinates on one dataset but not the other: %s'
% list(ambiguous_coords))
obj = self if inplace else self.copy()
obj._update_vars_and_coords(replace_vars, new_coord_names)
return obj
def _assert_all_in_dataset(self, names, virtual_okay=False):
bad_names = set(names) - set(self._variables)
if virtual_okay:
bad_names -= self.virtual_variables
if bad_names:
raise ValueError('One or more of the specified variables '
'cannot be found in this dataset')
def drop(self, labels, dim=None):
"""Drop variables or index labels from this dataset.
If a variable corresponding to a dimension is dropped, all variables
that use that dimension are also dropped.
Parameters
----------
labels : str
Names of variables or index labels to drop.
dim : None or str, optional
Dimension along which to drop index labels. By default (if
``dim is None``), drops variables rather than index labels.
Returns
-------
dropped : Dataset
"""
if utils.is_scalar(labels):
labels = [labels]
if dim is None:
return self._drop_vars(labels)
else:
new_index = self.indexes[dim].drop(labels)
return self.loc[{dim: new_index}]
def _drop_vars(self, names):
self._assert_all_in_dataset(names)
drop = set(names)
drop |= set(k for k, v in iteritems(self._variables)
if any(name in v.dims for name in names))
variables = OrderedDict((k, v) for k, v in iteritems(self._variables)
if k not in drop)
coord_names = set(k for k in self._coord_names if k in variables)
return self._replace_vars_and_dims(variables, coord_names)
def drop_vars(self, *names): # pragma: no cover
warnings.warn('the Dataset method `drop_vars` has been deprecated; '
'use `drop` instead',
FutureWarning, stacklevel=2)
return self.drop(names)
def transpose(self, *dims):
"""Return a new Dataset object with all array dimensions transposed.
Although the order of dimensions on each array will change, the dataset
dimensions themselves will remain in fixed (sorted) order.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions on each array. Otherwise,
reorder the dimensions to this order.
Returns
-------
transposed : Dataset
Each array in the dataset (including) coordinates will be
transposed to the given order.
Notes
-----
Although this operation returns a view of each array's data, it
is not lazy -- the data will be fully loaded into memory.
See Also
--------
numpy.transpose
DataArray.transpose
"""
if dims:
if set(dims) ^ set(self.dims):
raise ValueError('arguments to transpose (%s) must be '
'permuted dataset dimensions (%s)'
% (dims, tuple(self.dims)))
ds = self.copy()
for name, var in iteritems(self._variables):
var_dims = tuple(dim for dim in dims if dim in var.dims)
ds._variables[name] = var.transpose(*var_dims)
return ds
@property
def T(self):
return self.transpose()
def squeeze(self, dim=None):
"""Returns a new dataset with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : Dataset
This dataset, but with with all or a subset of the dimensions of
length 1 removed.
Notes
-----
Although this operation returns a view of each variable's data, it is
not lazy -- all variable data will be fully loaded.
See Also
--------
numpy.squeeze
"""
return common.squeeze(self, self.dims, dim)
def dropna(self, dim, how='any', thresh=None, subset=None):
"""Returns a new dataset with dropped labels for missing values along
the provided dimension.
Parameters
----------
dim : str
Dimension along which to drop missing values. Dropping along
multiple dimensions simultaneously is not yet supported.
how : {'any', 'all'}, optional
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
If supplied, require this many non-NA values.
subset : sequence, optional
Subset of variables to check for missing values. By default, all
variables in the dataset are checked.
Returns
-------
Dataset
"""
# TODO: consider supporting multiple dimensions? Or not, given that
# there are some ugly edge cases, e.g., pandas's dropna differs
# depending on the order of the supplied axes.
if dim not in self.dims:
raise ValueError('%s must be a single dataset dimension' % dim)
if subset is None:
subset = list(self.data_vars)
count = np.zeros(self.dims[dim], dtype=np.int64)
size = 0
for k in subset:
array = self._variables[k]
if dim in array.dims:
dims = [d for d in array.dims if d != dim]
count += array.count(dims)
size += np.prod([self.dims[d] for d in dims])
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == size
elif how == 'all':
mask = count > 0
elif how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
return self.isel(**{dim: mask})
def fillna(self, value):
"""Fill missing values in this object.
This operation follows the normal broadcasting and alignment rules that
xray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : scalar, ndarray, DataArray, dict or Dataset
Used to fill all matching missing values in this dataset's data
variables. Scalars, ndarrays or DataArrays arguments are used to
fill all data with aligned coordinates (for DataArrays).
Dictionaries or datasets match data variables and then align
coordinates if necessary.
Returns
-------
Dataset
"""
return self._fillna(value)
def reduce(self, func, dim=None, keep_attrs=False, numeric_only=False,
allow_lazy=False, **kwargs):
"""Reduce this dataset by applying `func` along some dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`f(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
numeric_only : bool, optional
If True, only apply ``func`` to variables with a numeric dtype.
**kwargs : dict
Additional keyword arguments passed on to ``func``.
Returns
-------
reduced : Dataset
Dataset with this object's DataArrays replaced with new DataArrays
of summarized data and the indicated dimension(s) removed.
"""
if isinstance(dim, basestring):
dims = set([dim])
elif dim is None:
dims = set(self.dims)
else:
dims = set(dim)
_assert_empty([dim for dim in dims if dim not in self.dims],
'Dataset does not contain the dimensions: %s')
variables = OrderedDict()
for name, var in iteritems(self._variables):
reduce_dims = [dim for dim in var.dims if dim in dims]
if reduce_dims or not var.dims:
if name not in self.coords:
if (not numeric_only
or np.issubdtype(var.dtype, np.number)
or var.dtype == np.bool_):
if len(reduce_dims) == 1:
# unpack dimensions for the benefit of functions
# like np.argmin which can't handle tuple arguments
reduce_dims, = reduce_dims
elif len(reduce_dims) == var.ndim:
# prefer to aggregate over axis=None rather than
# axis=(0, 1) if they will be equivalent, because
# the former is often more efficient
reduce_dims = None
variables[name] = var.reduce(func, dim=reduce_dims,
keep_attrs=keep_attrs,
allow_lazy=allow_lazy,
**kwargs)
else:
variables[name] = var
coord_names = set(k for k in self.coords if k in variables)
attrs = self.attrs if keep_attrs else None
return self._replace_vars_and_dims(variables, coord_names, attrs)
def apply(self, func, keep_attrs=False, args=(), **kwargs):
"""Apply a function over the data variables in this dataset.
Parameters
----------
func : function
Function which can be called in the form `f(x, **kwargs)` to
transform each DataArray `x` in this dataset into another
DataArray.
keep_attrs : bool, optional
If True, the dataset's attributes (`attrs`) will be copied from
the original object to the new one. If False, the new object will
be returned without attributes.
args : tuple, optional
Positional arguments passed on to `func`.
**kwargs : dict
Keyword arguments passed on to `func`.
Returns
-------
applied : Dataset
Resulting dataset from applying ``func`` over each data variable.
"""
variables = OrderedDict(
(k, maybe_wrap_array(v, func(v, *args, **kwargs)))
for k, v in iteritems(self.data_vars))
attrs = self.attrs if keep_attrs else None
return type(self)(variables, attrs=attrs)
def assign(self, **kwargs):
"""Assign new data variables to a Dataset, returning a new object
with all the original variables in addition to the new ones.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on the Dataset and assigned to new data variables. If
the values are not callable, (e.g. a DataArray, scalar, or array),
they are simply assigned.
Returns
-------
ds : Dataset
A new Dataset with the new variables in addition to all the
existing variables.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign`` is
possible, but you cannot reference other variables created within the
same ``assign`` call.
See Also
--------
pandas.DataFrame.assign
"""
data = self.copy()
# do all calculations first...
results = data._calc_assign_results(kwargs)
# ... and then assign
data.update(results)
return data
def to_array(self, dim='variable', name=None):
"""Convert this dataset into an xray.DataArray
The data variables of this dataset will be broadcast against each other
and stacked along the first axis of the new array. All coordinates of
this dataset will remain coordinates.
Parameters
----------
dim : str, optional
Name of the new dimension.
name : str, optional
Name of the new data array.
Returns
-------
array : xray.DataArray
"""
from .dataarray import DataArray
data_vars = [self.variables[k] for k in self.data_vars]
broadcast_vars = broadcast_variables(*data_vars)
data = ops.stack([b.data for b in broadcast_vars], axis=0)
coords = dict(self.coords)
coords[dim] = list(self.data_vars)
dims = (dim,) + broadcast_vars[0].dims
return DataArray(data, coords, dims, attrs=self.attrs, name=name)
def _to_dataframe(self, ordered_dims):
columns = [k for k in self if k not in self.dims]
data = [self._variables[k].expand_dims(ordered_dims).values.reshape(-1)
for k in columns]
index = self.coords.to_index(ordered_dims)
return pd.DataFrame(OrderedDict(zip(columns, data)), index=index)
def to_dataframe(self):
"""Convert this dataset into a pandas.DataFrame.
Non-index variables in this dataset form the columns of the
DataFrame. The DataFrame is be indexed by the Cartesian product of
this dataset's indices.
"""
return self._to_dataframe(self.dims)
@classmethod
def from_dataframe(cls, dataframe):
"""Convert a pandas.DataFrame into an xray.Dataset
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
into a tensor product of one-dimensional indices (filling in missing
values with NaN). This method will produce a Dataset very similar to
that on which the 'to_dataframe' method was called, except with
possibly redundant dimensions (since all dataset variables will have
the same dimensionality).
"""
# TODO: Add an option to remove dimensions along which the variables
# are constant, to enable consistent serialization to/from a dataframe,
# even if some variables have different dimensionality.
idx = dataframe.index
obj = cls()
if hasattr(idx, 'levels'):
# it's a multi-index
# expand the DataFrame to include the product of all levels
full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)
dataframe = dataframe.reindex(full_idx)
dims = [name if name is not None else 'level_%i' % n
for n, name in enumerate(idx.names)]
for dim, lev in zip(dims, idx.levels):
obj[dim] = (dim, lev)
shape = [lev.size for lev in idx.levels]
else:
if idx.size:
dims = (idx.name if idx.name is not None else 'index',)
obj[dims[0]] = (dims, idx)
else:
dims = []
shape = -1
for name, series in iteritems(dataframe):
data = series.values.reshape(shape)
obj[name] = (dims, data)
return obj
@staticmethod
def _unary_op(f):
@functools.wraps(f)
def func(self, *args, **kwargs):
ds = self.coords.to_dataset()
for k in self.data_vars:
ds._variables[k] = f(self._variables[k], *args, **kwargs)
return ds
return func
@staticmethod
def _binary_op(f, reflexive=False, join='inner', drop_na_vars=True):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
return NotImplemented
if hasattr(other, 'indexes'):
self, other = align(self, other, join=join, copy=False)
empty_indexes = [d for d, s in self.dims.items() if s == 0]
if empty_indexes:
raise ValueError('no overlapping labels for some '
'dimensions: %s' % empty_indexes)
g = f if not reflexive else lambda x, y: f(y, x)
ds = self._calculate_binary_op(g, other, drop_na_vars=drop_na_vars)
return ds
return func
@staticmethod
def _inplace_binary_op(f):
@functools.wraps(f)
def func(self, other):
if isinstance(other, groupby.GroupBy):
raise TypeError('in-place operations between a Dataset and '
'a grouped object are not permitted')
if hasattr(other, 'indexes'):
other = other.reindex_like(self, copy=False)
# we don't want to actually modify arrays in-place
g = ops.inplace_to_noninplace_op(f)
ds = self._calculate_binary_op(g, other, inplace=True)
self._replace_vars_and_dims(ds._variables, ds._coord_names,
ds._attrs, inplace=True)
return self
return func
def _calculate_binary_op(self, f, other, inplace=False, drop_na_vars=True):
def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):
dest_vars = OrderedDict()
performed_op = False
for k in lhs_data_vars:
if k in rhs_data_vars:
dest_vars[k] = f(lhs_vars[k], rhs_vars[k])
performed_op = True
elif inplace:
raise ValueError(
'datasets must have the same data variables '
'for in-place arithmetic operations: %s, %s'
% (list(lhs_data_vars), list(rhs_data_vars)))
elif not drop_na_vars:
# this shortcuts left alignment of variables for fillna
dest_vars[k] = lhs_vars[k]
if not performed_op:
raise ValueError(
'datasets have no overlapping data variables: %s, %s'
% (list(lhs_data_vars), list(rhs_data_vars)))
return dest_vars
if utils.is_dict_like(other) and not isinstance(other, Dataset):
# can't use our shortcut of doing the binary operation with
# Variable objects, so apply over our data vars instead.
new_data_vars = apply_over_both(self.data_vars, other,
self.data_vars, other)
return Dataset(new_data_vars)
other_coords = getattr(other, 'coords', None)
ds = self.coords.merge(other_coords)
if isinstance(other, Dataset):
new_vars = apply_over_both(self.data_vars, other.data_vars,
self.variables, other.variables)
else:
other_variable = getattr(other, 'variable', other)
new_vars = OrderedDict((k, f(self.variables[k], other_variable))
for k in self.data_vars)
ds._variables.update(new_vars)
return ds
ops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)
| kjordahl/xray | xray/core/dataset.py | Python | apache-2.0 | 72,806 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: common.py
# Author: Yuxin Wu <[email protected]>
import random
import time
import threading
import multiprocessing
import numpy as np
from tqdm import tqdm
from six.moves import queue
from tensorpack import *
from tensorpack.utils.concurrency import *
from tensorpack.utils.stats import *
def play_one_episode(player, func, verbose=False):
def f(s):
spc = player.get_action_space()
act = func([[s]])[0][0].argmax()
if random.random() < 0.001:
act = spc.sample()
if verbose:
print(act)
return act
return np.mean(player.play_one_episode(f))
def play_model(cfg, player):
predfunc = OfflinePredictor(cfg)
while True:
score = play_one_episode(player, predfunc)
print("Total:", score)
def eval_with_funcs(predictors, nr_eval, get_player_fn):
class Worker(StoppableThread, ShareSessionThread):
def __init__(self, func, queue):
super(Worker, self).__init__()
self._func = func
self.q = queue
def func(self, *args, **kwargs):
if self.stopped():
raise RuntimeError("stopped!")
return self._func(*args, **kwargs)
def run(self):
with self.default_sess():
player = get_player_fn(train=False)
while not self.stopped():
try:
score = play_one_episode(player, self.func)
# print("Score, ", score)
except RuntimeError:
return
self.queue_put_stoppable(self.q, score)
q = queue.Queue()
threads = [Worker(f, q) for f in predictors]
for k in threads:
k.start()
time.sleep(0.1) # avoid simulator bugs
stat = StatCounter()
try:
for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
r = q.get()
stat.feed(r)
logger.info("Waiting for all the workers to finish the last run...")
for k in threads:
k.stop()
for k in threads:
k.join()
while q.qsize():
r = q.get()
stat.feed(r)
except:
logger.exception("Eval")
finally:
if stat.count > 0:
return (stat.average, stat.max)
return (0, 0)
def eval_model_multithread(cfg, nr_eval, get_player_fn):
func = OfflinePredictor(cfg)
NR_PROC = min(multiprocessing.cpu_count() // 2, 8)
mean, max = eval_with_funcs([func] * NR_PROC, nr_eval, get_player_fn)
logger.info("Average Score: {}; Max Score: {}".format(mean, max))
class Evaluator(Triggerable):
def __init__(self, nr_eval, input_names, output_names, get_player_fn):
self.eval_episode = nr_eval
self.input_names = input_names
self.output_names = output_names
self.get_player_fn = get_player_fn
def _setup_graph(self):
NR_PROC = min(multiprocessing.cpu_count() // 2, 20)
self.pred_funcs = [self.trainer.get_predictor(
self.input_names, self.output_names)] * NR_PROC
def _trigger(self):
t = time.time()
mean, max = eval_with_funcs(
self.pred_funcs, self.eval_episode, self.get_player_fn)
t = time.time() - t
if t > 10 * 60: # eval takes too long
self.eval_episode = int(self.eval_episode * 0.94)
self.trainer.monitors.put_scalar('mean_score', mean)
self.trainer.monitors.put_scalar('max_score', max)
def play_n_episodes(player, predfunc, nr):
logger.info("Start evaluation: ")
for k in range(nr):
if k != 0:
player.restart_episode()
score = play_one_episode(player, predfunc)
print("{}/{}, score={}".format(k, nr, score))
| haamoon/tensorpack | examples/DeepQNetwork/common.py | Python | apache-2.0 | 3,829 |
import optparse
import pickle
#converts urls to wiki_id
parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'input_file', help = 'input_file')
parser.add_option('-o','--output', dest = 'output_file', help = 'output_file')
(options, args) = parser.parse_args()
if options.input_file is None:
options.input_file = raw_input('Enter input file:')
if options.output_file is None:
options.output_file = raw_input('Enter output file:')
input_file = options.input_file
output_file = options.output_file
#define the dictionary url:wiki_id
wiki_from_url_dict = {}
with open('../../datasets/dbpedia/page_ids_en_2016.ttl','r') as f:
for line in f:
line = line.split(' ')
if line[0] == '#':
continue
url = line[0]
wiki_id_list = line[2].split('\"')
wiki_id = wiki_id_list[1]
print(url, wiki_id)
wiki_from_url_dict[url] = int(wiki_id)
output_file_write = open(output_file,'w')
#iterate through the page links and turn urls into wiki_ids
max_wiki_id = max(wiki_from_url_dict.values()) + 1
local_id = {}
count = 0
with open(input_file) as page_links:
for line in page_links:
line = line.split(' ')
if line[0] == '#':
continue
url_1 = line[0]
url_2 = line[2]
#if wiki_id not found, assign an id = max_wiki_id and increment max_wiki_id
try:
wiki_id1 = wiki_from_url_dict[url_1] #first entity has wiki_id
try:
wiki_id2 = wiki_from_url_dict[url_2] #first and second entities have wiki_ids
except (KeyError, IndexError): #first entity has wiki_id, second entity doesn't
try: #check if a local id has already been assigned
wiki_id2 = local_id[url_2]
except (KeyError, IndexError):
wiki_id2 = max_wiki_id
local_id[url_2] = wiki_id2
max_wiki_id +=1
except (KeyError, IndexError): #first entity doesn't have wiki_id
try:
wiki_id1 = local_id[url_1]
except (KeyError, IndexError):
wiki_id1 = max_wiki_id
local_id[url_1] = wiki_id1
max_wiki_id += 1
try: #first entity doesn't have wiki_id, second entity has it
wiki_id2 = wiki_from_url_dict[url_2]
except (KeyError, IndexError): #neither first nor second entity have wiki_ids
try: #check if a local id has already been assigned
wiki_id2 = local_id[url_2]
except (KeyError, IndexError):
wiki_id2 = max_wiki_id
local_id[url_2] = wiki_id2
max_wiki_id +=1
output_file_write.write('%d %d\n' %(wiki_id1,wiki_id2))
print count
count += 1
output_file_write.close()
pickle.dump(local_id,open('../../datasets/dbpedia/local_id_to_url_full_mapping_based.p','wb'))
| MultimediaSemantics/entity2vec | scripts/old/page_links_to_edge_list_wiki.py | Python | apache-2.0 | 3,039 |
import errno
import os
import pwd
import shutil
import sys
from jinja2 import Environment, FileSystemLoader
class TutorialEnv:
LOCAL_MACHINE = ("Local Machine Condor Pool", "submit-host")
USC_HPCC_CLUSTER = ("USC HPCC Cluster", "usc-hpcc")
OSG_FROM_ISI = ("OSG from ISI submit node", "osg")
XSEDE_BOSCO = ("XSEDE, with Bosco", "xsede-bosco")
BLUEWATERS_GLITE = ("Bluewaters, with Glite", "bw-glite")
TACC_WRANGLER = ("TACC Wrangler with Glite", "wrangler-glite")
OLCF_TITAN = ("OLCF TITAN with Glite", "titan-glite")
OLCF_SUMMIT_KUBERNETES_BOSCO = (
"OLCF Summit from Kubernetes using BOSCO",
"summit-kub-bosco",
)
class TutorialExample:
PROCESS = ("Process", "process")
PIPELINE = ("Pipeline", "pipeline")
SPLIT = ("Split", "split")
MERGE = ("Merge", "merge")
EPA = ("EPA (requires R)", "r-epa")
DIAMOND = ("Diamond", "diamond")
CONTAINER = ("Population Modeling using Containers", "population")
MPI = ("MPI Hello World", "mpi-hw")
def choice(question, options, default):
"Ask the user to choose from a short list of named options"
while True:
sys.stdout.write("{} ({}) [{}]: ".format(question, "/".join(options), default))
answer = sys.stdin.readline().strip()
if len(answer) == 0:
return default
for opt in options:
if answer == opt:
return answer
def yesno(question, default="y"):
"Ask the user a yes/no question"
while True:
sys.stdout.write("{} (y/n) [{}]: ".format(question, default))
answer = sys.stdin.readline().strip().lower()
if len(answer) == 0:
answer = default
if answer == "y":
return True
elif answer == "n":
return False
def query(question, default=None):
"Ask the user a question and return the response"
while True:
if default:
sys.stdout.write("{} [{}]: ".format(question, default))
else:
sys.stdout.write("%s: " % question)
answer = sys.stdin.readline().strip().replace(" ", "_")
if answer == "":
if default:
return default
else:
return answer
def optionlist(question, options, default=0):
"Ask the user to choose from a list of options"
for i, option in enumerate(options):
print("%d: %s" % (i + 1, option[0]))
while True:
sys.stdout.write("%s (1-%d) [%d]: " % (question, len(options), default + 1))
answer = sys.stdin.readline().strip()
if len(answer) == 0:
return options[default][1]
try:
optno = int(answer)
if optno > 0 and optno <= len(options):
return options[optno - 1][1]
except Exception:
pass
class Workflow:
def __init__(self, workflowdir, sharedir):
self.jinja = Environment(loader=FileSystemLoader(sharedir), trim_blocks=True)
self.name = os.path.basename(workflowdir)
self.workflowdir = workflowdir
self.sharedir = sharedir
self.properties = {}
self.home = os.environ["HOME"]
self.user = pwd.getpwuid(os.getuid())[0]
self.tutorial = None
self.generate_tutorial = False
self.tutorial_setup = None
self.compute_queue = "default"
self.project = "MYPROJ123"
sysname, _, _, _, machine = os.uname()
if sysname == "Darwin":
self.os = "MACOSX"
else:
# Probably Linux
self.os = sysname.upper()
self.arch = machine
def copy_template(self, template, dest, mode=0o644):
"Copy template to dest in workflowdir with mode"
path = os.path.join(self.workflowdir, dest)
t = self.jinja.get_template(template)
t.stream(**self.__dict__).dump(path)
os.chmod(path, mode)
def copy_dir(self, src, dest):
# self.mkdir(dest)
if not src.startswith("/"):
src = os.path.join(self.sharedir, src)
try:
dest = os.path.join(self.workflowdir, dest)
shutil.copytree(src, dest)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dest)
else:
raise
def mkdir(self, path):
"Make relative directory in workflowdir"
path = os.path.join(self.workflowdir, path)
if not os.path.exists(path):
os.makedirs(path)
def configure(self):
# The tutorial is a special case
if yesno("Do you want to generate a tutorial workflow?", "n"):
self.config = "tutorial"
self.daxgen = "tutorial"
self.generate_tutorial = True
# determine the environment to setup tutorial for
self.tutorial_setup = optionlist(
"What environment is tutorial to be setup for?",
[
TutorialEnv.LOCAL_MACHINE,
TutorialEnv.USC_HPCC_CLUSTER,
TutorialEnv.OSG_FROM_ISI,
TutorialEnv.XSEDE_BOSCO,
TutorialEnv.BLUEWATERS_GLITE,
TutorialEnv.TACC_WRANGLER,
TutorialEnv.OLCF_TITAN,
TutorialEnv.OLCF_SUMMIT_KUBERNETES_BOSCO,
],
)
# figure out what example options to provide
examples = [
TutorialExample.PROCESS,
TutorialExample.PIPELINE,
TutorialExample.SPLIT,
TutorialExample.MERGE,
TutorialExample.EPA,
TutorialExample.CONTAINER,
]
if self.tutorial_setup != "osg":
examples.append(TutorialExample.DIAMOND)
if self.tutorial_setup in [
"bw-glite",
"wrangler-glite",
"titan-glite",
"summit-kub-bosco",
]:
examples.append(TutorialExample.MPI)
self.project = query(
"What project your jobs should run under. For example on TACC there are like : TG-DDM160003 ?"
)
self.tutorial = optionlist("What tutorial workflow do you want?", examples)
self.setup_tutorial()
return
# Determine which DAX generator API to use
self.daxgen = choice(
"What DAX generator API do you want to use?",
["python", "perl", "java", "r"],
"python",
)
# Determine what kind of site catalog we need to generate
self.config = optionlist(
"What does your computing infrastructure look like?",
[
("Local Machine Condor Pool", "condorpool"),
("Remote Cluster using Globus GRAM", "globus"),
("Remote Cluster using CREAMCE", "creamce"),
("Local PBS Cluster with Glite", "glite"),
("Remote PBS Cluster with BOSCO and SSH", "bosco"),
],
)
# Find out some information about the site
self.sitename = query("What do you want to call your compute site?", "compute")
self.os = choice(
"What OS does your compute site have?", ["LINUX", "MACOSX"], self.os
)
self.arch = choice(
"What architecture does your compute site have?",
["x86_64", "x86"],
self.arch,
)
def setup_tutorial(self):
"""
Set up tutorial for pre-defined computing environments
:return:
"""
if self.tutorial_setup is None:
self.tutorial_setup = "submit-host"
if self.tutorial_setup == "submit-host":
self.sitename = "condorpool"
elif self.tutorial_setup == "usc-hpcc":
self.sitename = "usc-hpcc"
self.config = "glite"
self.compute_queue = "quick"
# for running the whole workflow as mpi job
self.properties["pegasus.job.aggregator"] = "mpiexec"
elif self.tutorial_setup == "osg":
self.sitename = "osg"
self.os = "linux"
if not yesno("Do you want to use Condor file transfers", "y"):
self.staging_site = "isi_workflow"
elif self.tutorial_setup == "xsede-bosco":
self.sitename = "condorpool"
elif self.tutorial_setup == "bw-glite":
self.sitename = "bluewaters"
self.config = "glite"
self.compute_queue = "normal"
elif self.tutorial_setup == "wrangler-glite":
self.sitename = "wrangler"
self.config = "glite"
self.compute_queue = "normal"
elif self.tutorial_setup == "titan-glite":
self.sitename = "titan"
self.config = "glite"
self.compute_queue = "titan"
elif self.tutorial_setup == "summit-kub-bosco":
self.sitename = "summit"
self.config = "bosco"
self.compute_queue = "batch"
return
def generate(self):
os.makedirs(self.workflowdir)
if self.tutorial != "population":
self.mkdir("input")
self.mkdir("output")
if self.generate_tutorial:
self.copy_template("%s/tc.txt" % self.tutorial, "tc.txt")
if self.tutorial == "r-epa":
self.copy_template("%s/daxgen.R" % self.tutorial, "daxgen.R")
elif self.tutorial != "mpi-hw":
self.copy_template("%s/daxgen.py" % self.tutorial, "daxgen.py")
if self.tutorial == "diamond":
# Executables used by the diamond workflow
self.mkdir("bin")
self.copy_template(
"diamond/transformation.py", "bin/preprocess", mode=0o755
)
self.copy_template(
"diamond/transformation.py", "bin/findrange", mode=0o755
)
self.copy_template(
"diamond/transformation.py", "bin/analyze", mode=0o755
)
# Diamond input file
self.copy_template("diamond/f.a", "input/f.a")
elif self.tutorial == "split":
# Split workflow input file
self.mkdir("bin")
self.copy_template("split/pegasus.html", "input/pegasus.html")
elif self.tutorial == "r-epa":
# Executables used by the R-EPA workflow
self.mkdir("bin")
self.copy_template(
"r-epa/epa-wrapper.sh", "bin/epa-wrapper.sh", mode=0o755
)
self.copy_template("r-epa/setupvar.R", "bin/setupvar.R", mode=0o755)
self.copy_template(
"r-epa/weighted.average.R", "bin/weighted.average.R", mode=0o755
)
self.copy_template(
"r-epa/cumulative.percentiles.R",
"bin/cumulative.percentiles.R",
mode=0o755,
)
elif self.tutorial == "population":
self.copy_template("%s/Dockerfile" % self.tutorial, "Dockerfile")
self.copy_template("%s/Singularity" % self.tutorial, "Singularity")
self.copy_template(
"%s/tc.txt.containers" % self.tutorial, "tc.txt.containers"
)
self.copy_dir("%s/scripts" % self.tutorial, "scripts")
self.copy_dir("%s/data" % self.tutorial, "input")
# copy the mpi wrapper, c code and mpi
elif self.tutorial == "mpi-hw":
# copy the mpi wrapper, c code and mpi example
# Executables used by the mpi-hw workflow
self.mkdir("bin")
self.copy_template(
"%s/pegasus-mpi-hw.c" % self.tutorial, "pegasus-mpi-hw.c"
)
self.copy_template("%s/Makefile" % self.tutorial, "Makefile")
self.copy_template("%s/daxgen.py.template" % self.tutorial, "daxgen.py")
self.copy_template(
"%s/mpi-hello-world-wrapper" % self.tutorial,
"bin/mpi-hello-world-wrapper",
mode=0o755,
)
self.copy_template("split/pegasus.html", "input/f.in")
else:
self.copy_template("tc.txt", "tc.txt")
if self.daxgen == "python":
self.copy_template("daxgen/daxgen.py", "daxgen.py")
elif self.daxgen == "perl":
self.copy_template("daxgen/daxgen.pl", "daxgen.pl")
elif self.daxgen == "java":
self.copy_template("daxgen/DAXGen.java", "DAXGen.java")
elif self.daxgen == "r":
self.copy_template("daxgen/daxgen.R", "daxgen.R")
else:
assert False
self.copy_template("sites.xml", "sites.xml")
self.copy_template("plan_dax.sh", "plan_dax.sh", mode=0o755)
self.copy_template("plan_cluster_dax.sh", "plan_cluster_dax.sh", mode=0o755)
self.copy_template("generate_dax.sh", "generate_dax.sh", mode=0o755)
self.copy_template("README.md", "README.md")
self.copy_template("rc.txt", "rc.txt")
self.copy_template("pegasus.properties", "pegasus.properties")
if self.tutorial == "diamond":
if self.tutorial_setup == "wrangler-glite":
self.copy_template(
"pmc-wrapper.wrangler", "bin/pmc-wrapper", mode=0o755
)
elif self.tutorial_setup == "titan-glite":
self.copy_template("pmc-wrapper.titan", "bin/pmc-wrapper", mode=0o755)
elif self.tutorial_setup == "wrangler-glite":
self.copy_template(
"pmc-wrapper.wrangler", "bin/pmc-wrapper", mode=0o755
)
elif self.tutorial_setup == "summit-kub-bosco":
self.copy_template("pmc-wrapper.summit", "bin/pmc-wrapper", mode=0o755)
if self.generate_tutorial:
sys.stdout.write(
"Pegasus Tutorial setup for example workflow - %s for execution on %s in directory %s\n"
% (self.tutorial, self.tutorial_setup, self.workflowdir)
)
def usage():
print("Usage: %s WORKFLOW_DIR" % sys.argv[0])
def main(pegasus_share_dir):
if len(sys.argv) != 2:
usage()
exit(1)
if "-h" in sys.argv:
usage()
exit(1)
workflowdir = sys.argv[1]
if os.path.exists(workflowdir):
print("ERROR: WORKFLOW_DIR '%s' already exists" % workflowdir)
exit(1)
workflowdir = os.path.abspath(workflowdir)
sharedir = os.path.join(pegasus_share_dir, "init")
w = Workflow(workflowdir, sharedir)
w.configure()
w.generate()
| pegasus-isi/pegasus | packages/pegasus-python/src/Pegasus/init-old.py | Python | apache-2.0 | 14,973 |
"""
Django settings for sparta project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')mg$xo^v*2mmwidr0ak6%9&!@e18v8t#7@+vd+wqg8kydb48k7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sparta.urls'
WSGI_APPLICATION = 'sparta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'USER':'root',
'NAME':'fordjango',
'PASSWORD':'123456',
'HOST':'localhost',
'PORT':''
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join('/home/dexter/weaponx/Django/sparta/sparta/static')
STATIC_URL = '/assets/'
STATICFILES_DIRS = (
'/home/dexter/weaponx/Django/sparta/sparta/assets',
)
TEMPLATE_DIRS=('/home/dexter/weaponx/Django/sparta/sparta/template',) | sureshprasanna70/the-spartan-blog | sparta/settings.py | Python | apache-2.0 | 2,223 |
# coding=utf8
from django.views.generic import ListView, DetailView, CreateView
from django.db.models import Q
from django.http import JsonResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.shortcuts import render
from pure_pagination.mixins import PaginationMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from django.conf import settings
from books.models import Publish, Author, Book
from books.forms import PublishForm
import json
import logging
logger = logging.getLogger('opsweb')
class PublishListView(LoginRequiredMixin, PaginationMixin, ListView):
'''
动作:getlist, create
'''
model = Publish
template_name = "books/publish_list.html"
context_object_name = "publish_list"
paginate_by = 5
keyword = ''
def get_queryset(self):
queryset = super(PublishListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword', '').strip()
if self.keyword:
queryset = queryset.filter(Q(name__icontains=self.keyword) |
Q(address__icontains=self.keyword) |
Q(city__icontains=self.keyword))
return queryset
def get_context_data(self, **kwargs):
context = super(PublishListView, self).get_context_data(**kwargs)
context['keyword'] = self.keyword
return context
def post(self, request):
form = PublishForm(request.POST)
if form.is_valid():
form.save()
res = {'code': 0, 'result': '添加出版商成功'}
else:
# form.errors会把验证不通过的信息以对象的形式传到前端,前端直接渲染即可
res = {'code': 1, 'errmsg': form.errors}
print form.errors
return JsonResponse(res, safe=True)
class PublishDetailView(LoginRequiredMixin, DetailView):
'''
动作:getone, update, delete
'''
model = Publish
template_name = "books/publish_detail.html"
context_object_name = 'publish'
next_url = '/books/publishlist/'
def post(self, request, *args, **kwargs):
pk = kwargs.get('pk')
p = self.model.objects.get(pk=pk)
form = PublishForm(request.POST, instance=p)
if form.is_valid():
form.save()
res = {"code": 0, "result": "更新出版商成功", 'next_url': self.next_url}
else:
res = {"code": 1, "errmsg": form.errors, 'next_url': self.next_url}
return render(request, settings.JUMP_PAGE, res)
# return HttpResponseRedirect(reverse('books:publish_detail',args=[pk]))
def delete(self, request, *args, **kwargs):
pk = kwargs.get('pk')
# 通过出版社对象查所在该出版社的书籍,如果有关联书籍不可以删除,没有关联书籍可以删除
try:
obj = self.model.objects.get(pk=pk)
if not obj.book_set.all():
self.model.objects.filter(pk=pk).delete()
res = {"code": 0, "result": "删除出版商成功"}
else:
res = {"code": 1, "errmsg": "该出版社有关联书籍,请联系管理员"}
except:
res = {"code": 1, "errmsg": "删除错误请联系管理员"}
return JsonResponse(res, safe=True)
| 1032231418/python | lesson10/apps/books/publish/__init__.py | Python | apache-2.0 | 3,345 |
#!/bin/env python
import itertools
import collections
def read_table(filename):
with open(filename) as fp:
header = next(fp).split()
rows = [line.split()[1:] for line in fp if line.strip()]
columns = zip(*rows)
data = dict(zip(header, columns))
return data
table = read_table("../../data/colldata.txt")
pots = sorted(table)
alphabet = "+-?"
for num in range(2, len(table) + 1):
for group in itertools.combinations(pots, num):
patterns = zip(*[table[p] for p in group])
counts = collections.Counter(patterns)
for poss in itertools.product(alphabet, repeat=num):
print ', '.join(group) + ':',
print ''.join(poss), counts[poss]
| ketancmaheshwari/hello-goog | src/python/collectionsexample.py | Python | apache-2.0 | 718 |
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import operator
import threading
from mapproxy.grid import bbox_intersects, bbox_contains
from mapproxy.util.py import cached_property
from mapproxy.util.geom import (
require_geom_support,
load_polygon_lines,
transform_geometry,
bbox_polygon,
)
from mapproxy.srs import SRS
import logging
log_config = logging.getLogger('mapproxy.config.coverage')
try:
import shapely.geometry
import shapely.prepared
except ImportError:
# missing Shapely is handled by require_geom_support
pass
def coverage(geom, srs):
if isinstance(geom, (list, tuple)):
return BBOXCoverage(geom, srs)
else:
return GeomCoverage(geom, srs)
def load_limited_to(limited_to):
require_geom_support()
srs = SRS(limited_to['srs'])
geom = limited_to['geometry']
if not hasattr(geom, 'type'): # not a Shapely geometry
if isinstance(geom, (list, tuple)):
geom = bbox_polygon(geom)
else:
polygons = load_polygon_lines(geom.split('\n'))
if len(polygons) == 1:
geom = polygons[0]
else:
geom = shapely.geometry.MultiPolygon(polygons)
return GeomCoverage(geom, srs, clip=True)
class MultiCoverage(object):
clip = False
"""Aggregates multiple coverages"""
def __init__(self, coverages):
self.coverages = coverages
self.bbox = self.extent.bbox
@cached_property
def extent(self):
return reduce(operator.add, [c.extent for c in self.coverages])
def intersects(self, bbox, srs):
return any(c.intersects(bbox, srs) for c in self.coverages)
def contains(self, bbox, srs):
return any(c.contains(bbox, srs) for c in self.coverages)
def transform_to(self, srs):
return MultiCoverage([c.transform_to(srs) for c in self.coverages])
def __eq__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
if self.bbox != other.bbox:
return False
if len(self.coverages) != len(other.coverages):
return False
for a, b in zip(self.coverages, other.coverages):
if a != b:
return False
return True
def __ne__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<MultiCoverage %r: %r>' % (self.extent.llbbox, self.coverages)
class BBOXCoverage(object):
clip = False
def __init__(self, bbox, srs):
self.bbox = bbox
self.srs = srs
self.geom = None
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
def _bbox_in_coverage_srs(self, bbox, srs):
if srs != self.srs:
bbox = srs.transform_bbox_to(self.srs, bbox)
return bbox
def intersects(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_intersects(self.bbox, bbox)
def intersection(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
intersection = (
max(self.bbox[0], bbox[0]),
max(self.bbox[1], bbox[1]),
min(self.bbox[2], bbox[2]),
min(self.bbox[3], bbox[3]),
)
if intersection[0] >= intersection[2] or intersection[1] >= intersection[3]:
return None
return BBOXCoverage(intersection, self.srs)
def contains(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_contains(self.bbox, bbox)
def transform_to(self, srs):
if srs == self.srs:
return self
bbox = self.srs.transform_bbox_to(srs, self.bbox)
return BBOXCoverage(bbox, srs)
def __eq__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
return True
def __ne__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<BBOXCoverage %r/%r>' % (self.extent.llbbox, self.bbox)
class GeomCoverage(object):
def __init__(self, geom, srs, clip=False):
self.geom = geom
self.bbox = geom.bounds
self.srs = srs
self.clip = clip
self._prep_lock = threading.Lock()
self._prepared_geom = None
self._prepared_counter = 0
self._prepared_max = 10000
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
@property
def prepared_geom(self):
# GEOS internal data structure for prepared geometries grows over time,
# recreate to limit memory consumption
if not self._prepared_geom or self._prepared_counter > self._prepared_max:
self._prepared_geom = shapely.prepared.prep(self.geom)
self._prepared_counter = 0
self._prepared_counter += 1
return self._prepared_geom
def _geom_in_coverage_srs(self, geom, srs):
if isinstance(geom, shapely.geometry.base.BaseGeometry):
if srs != self.srs:
geom = transform_geometry(srs, self.srs, geom)
elif len(geom) == 2:
if srs != self.srs:
geom = srs.transform_to(self.srs, geom)
geom = shapely.geometry.Point(geom)
else:
if srs != self.srs:
geom = srs.transform_bbox_to(self.srs, geom)
geom = bbox_polygon(geom)
return geom
def transform_to(self, srs):
if srs == self.srs:
return self
geom = transform_geometry(self.srs, srs, self.geom)
return GeomCoverage(geom, srs)
def intersects(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.intersects(bbox)
def intersection(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
return GeomCoverage(self.geom.intersection(bbox), self.srs)
def contains(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.contains(bbox)
def __eq__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
if not self.geom.equals(other.geom):
return False
return True
def __ne__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<GeomCoverage %r: %r>' % (self.extent.llbbox, self.geom) | Anderson0026/mapproxy | mapproxy/util/coverage.py | Python | apache-2.0 | 7,709 |
# Copyright (c) 2019 Infortrend Technology, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class InfortrendNASTestData(object):
fake_share_id = ['5a0aa06e-1c57-4996-be46-b81e360e8866', # NFS
'aac4fe64-7a9c-472a-b156-9adbb50b4d29'] # CIFS
fake_share_name = [fake_share_id[0].replace('-', ''),
fake_share_id[1].replace('-', '')]
fake_channel_ip = ['172.27.112.223', '172.27.113.209']
fake_service_status_data = ('(64175, 1234, 272, 0)\n\n'
'{"cliCode": '
'[{"Return": "0x0000", "CLI": "Successful"}], '
'"returnCode": [], '
'"data": '
'[{"A": '
'{"NFS": '
'{"displayName": "NFS", '
'"state_time": "2017-05-04 14:19:53", '
'"enabled": true, '
'"cpu_rate": "0.0", '
'"mem_rate": "0.0", '
'"state": "exited", '
'"type": "share"}}}]}\n\n')
fake_folder_status_data = ('(64175, 1234, 1017, 0)\n\n'
'{"cliCode": '
'[{"Return": "0x0000", "CLI": "Successful"}], '
'"returnCode": [], '
'"data": '
'[{"utility": "1.00", '
'"used": "33886208", '
'"subshare": true, '
'"share": false, '
'"worm": "", '
'"free": "321931374592", '
'"fsType": "xfs", '
'"owner": "A", '
'"readOnly": false, '
'"modifyTime": "2017-04-27 16:16", '
'"directory": "/share-pool-01/LV-1", '
'"volumeId": "6541BAFB2E6C57B6", '
'"mounted": true, '
'"size": "321965260800"}, '
'{"utility": "1.00", '
'"used": "33779712", '
'"subshare": false, '
'"share": false, '
'"worm": "", '
'"free": "107287973888", '
'"fsType": "xfs", '
'"owner": "A", '
'"readOnly": false, '
'"modifyTime": "2017-04-27 15:45", '
'"directory": "/share-pool-02/LV-1", '
'"volumeId": "147A8FB67DA39914", '
'"mounted": true, '
'"size": "107321753600"}]}\n\n')
fake_nfs_status_off = [{
'A': {
'NFS': {
'displayName': 'NFS',
'state_time': '2017-05-04 14:19:53',
'enabled': False,
'cpu_rate': '0.0',
'mem_rate': '0.0',
'state': 'exited',
'type': 'share',
}
}
}]
fake_folder_status = [{
'utility': '1.00',
'used': '33886208',
'subshare': True,
'share': False,
'worm': '',
'free': '321931374592',
'fsType': 'xfs',
'owner': 'A',
'readOnly': False,
'modifyTime': '2017-04-27 16:16',
'directory': '/share-pool-01/LV-1',
'volumeId': '6541BAFB2E6C57B6',
'mounted': True,
'size': '321965260800'}, {
'utility': '1.00',
'used': '33779712',
'subshare': False,
'share': False,
'worm': '',
'free': '107287973888',
'fsType': 'xfs',
'owner': 'A',
'readOnly': False,
'modifyTime': '2017-04-27 15:45',
'directory': '/share-pool-02/LV-1',
'volumeId': '147A8FB67DA39914',
'mounted': True,
'size': '107321753600',
}]
def fake_get_channel_status(self, ch1_status='UP'):
return [{
'datalink': 'mgmt0',
'status': 'UP',
'typeConfig': 'DHCP',
'IP': '172.27.112.125',
'MAC': '00:d0:23:00:15:a6',
'netmask': '255.255.240.0',
'type': 'dhcp',
'gateway': '172.27.127.254'}, {
'datalink': 'CH0',
'status': 'UP',
'typeConfig': 'DHCP',
'IP': self.fake_channel_ip[0],
'MAC': '00:d0:23:80:15:a6',
'netmask': '255.255.240.0',
'type': 'dhcp',
'gateway': '172.27.127.254'}, {
'datalink': 'CH1',
'status': ch1_status,
'typeConfig': 'DHCP',
'IP': self.fake_channel_ip[1],
'MAC': '00:d0:23:40:15:a6',
'netmask': '255.255.240.0',
'type': 'dhcp',
'gateway': '172.27.127.254'}, {
'datalink': 'CH2',
'status': 'DOWN',
'typeConfig': 'DHCP',
'IP': '',
'MAC': '00:d0:23:c0:15:a6',
'netmask': '',
'type': '',
'gateway': ''}, {
'datalink': 'CH3',
'status': 'DOWN',
'typeConfig': 'DHCP',
'IP': '',
'MAC': '00:d0:23:20:15:a6',
'netmask': '',
'type': '',
'gateway': '',
}]
fake_fquota_status = [{
'quota': '21474836480',
'used': '0',
'name': 'test-folder',
'type': 'subfolder',
'id': '537178178'}, {
'quota': '32212254720',
'used': '0',
'name': fake_share_name[0],
'type': 'subfolder',
'id': '805306752'}, {
'quota': '53687091200',
'used': '21474836480',
'name': fake_share_name[1],
'type': 'subfolder',
'id': '69'}, {
'quota': '94091997184',
'used': '0',
'type': 'subfolder',
'id': '70',
"name": 'test-folder-02'
}]
fake_fquota_status_with_no_settings = []
def fake_get_share_status_nfs(self, status=False):
fake_share_status_nfs = [{
'ftp': False,
'cifs': False,
'oss': False,
'sftp': False,
'nfs': status,
'directory': '/LV-1/share-pool-01/' + self.fake_share_name[0],
'exist': True,
'afp': False,
'webdav': False
}]
if status:
fake_share_status_nfs[0]['nfs_detail'] = {
'hostList': [{
'uid': '65534',
'insecure': 'insecure',
'squash': 'all',
'access': 'ro',
'host': '*',
'gid': '65534',
'mode': 'async',
'no_subtree_check': 'no_subtree_check',
}]
}
return fake_share_status_nfs
def fake_get_share_status_cifs(self, status=False):
fake_share_status_cifs = [{
'ftp': False,
'cifs': status,
'oss': False,
'sftp': False,
'nfs': False,
'directory': '/share-pool-01/LV-1/' + self.fake_share_name[1],
'exist': True,
'afp': False,
'webdav': False
}]
if status:
fake_share_status_cifs[0]['cifs_detail'] = {
'available': True,
'encrypt': False,
'description': '',
'sharename': 'cifs-01',
'failover': '',
'AIO': True,
'priv': 'None',
'recycle_bin': False,
'ABE': True,
}
return fake_share_status_cifs
fake_subfolder_data = [{
'size': '6',
'index': '34',
'description': '',
'encryption': '',
'isEnd': False,
'share': False,
'volumeId': '6541BAFB2E6C57B6',
'quota': '',
'modifyTime': '2017-04-06 11:35',
'owner': 'A',
'path': '/share-pool-01/LV-1/UserHome',
'subshare': True,
'type': 'subfolder',
'empty': False,
'name': 'UserHome'}, {
'size': '6',
'index': '39',
'description': '',
'encryption': '',
'isEnd': False,
'share': False,
'volumeId': '6541BAFB2E6C57B6',
'quota': '21474836480',
'modifyTime': '2017-04-27 15:44',
'owner': 'A',
'path': '/share-pool-01/LV-1/test-folder',
'subshare': False,
'type': 'subfolder',
'empty': True,
'name': 'test-folder'}, {
'size': '6',
'index': '45',
'description': '',
'encryption': '',
'isEnd': False,
'share': True,
'volumeId': '6541BAFB2E6C57B6',
'quota': '32212254720',
'modifyTime': '2017-04-27 16:15',
'owner': 'A',
'path': '/share-pool-01/LV-1/' + fake_share_name[0],
'subshare': False,
'type': 'subfolder',
'empty': True,
'name': fake_share_name[0]}, {
'size': '6',
'index': '512',
'description': '',
'encryption': '',
'isEnd': True,
'share': True,
'volumeId': '6541BAFB2E6C57B6',
'quota': '53687091200',
'modifyTime': '2017-04-27 16:16',
'owner': 'A',
'path': '/share-pool-01/LV-1/' + fake_share_name[1],
'subshare': False,
'type': 'subfolder',
'empty': True,
'name': fake_share_name[1]}, {
'size': '6',
'index': '777',
'description': '',
'encryption': '',
'isEnd': False,
'share': False,
'volumeId': '6541BAFB2E6C57B6',
'quota': '94091997184',
'modifyTime': '2017-04-28 15:44',
'owner': 'A',
'path': '/share-pool-01/LV-1/test-folder-02',
'subshare': False,
'type': 'subfolder',
'empty': True,
'name': 'test-folder-02'
}]
fake_cifs_user_list = [{
'Superuser': 'No',
'Group': 'users',
'Description': '',
'Quota': 'none',
'PWD Expiry Date': '2291-01-19',
'Home Directory': '/share-pool-01/LV-1/UserHome/user01',
'UID': '100001',
'Type': 'Local',
'Name': 'user01'}, {
'Superuser': 'No',
'Group': 'users',
'Description': '',
'Quota': 'none',
'PWD Expiry Date': '2017-08-07',
'Home Directory': '/share-pool-01/LV-1/UserHome/user02',
'UID': '100002',
'Type': 'Local',
'Name': 'user02'
}]
fake_share_status_nfs_with_rules = [{
'ftp': False,
'cifs': False,
'oss': False,
'sftp': False,
'nfs': True,
'directory': '/share-pool-01/LV-1/' + fake_share_name[0],
'exist': True,
'nfs_detail': {
'hostList': [{
'uid': '65534',
'insecure': 'insecure',
'squash': 'all',
'access': 'ro',
'host': '*',
'gid': '65534',
'mode': 'async',
'no_subtree_check':
'no_subtree_check'}, {
'uid': '65534',
'insecure': 'insecure',
'squash': 'all',
'access': 'rw',
'host': '172.27.1.1',
'gid': '65534',
'mode': 'async',
'no_subtree_check': 'no_subtree_check'}, {
'uid': '65534',
'insecure': 'insecure',
'squash': 'all',
'access': 'rw',
'host': '172.27.1.2',
'gid': '65534',
'mode': 'async',
'no_subtree_check': 'no_subtree_check'}]
},
'afp': False,
'webdav': False,
}]
fake_share_status_cifs_with_rules = [
{
'permission': {
'Read': True,
'Write': True,
'Execute': True},
'type': 'user',
'id': '100001',
'name': 'user01'
}, {
'permission': {
'Read': True,
'Write': False,
'Execute': True},
'type': 'user',
'id': '100002',
'name': 'user02'
}, {
'permission': {
'Read': True,
'Write': False,
'Execute': True},
'type': 'group@',
'id': '100',
'name': 'users'
}, {
'permission': {
'Read': True,
'Write': False,
'Execute': True},
'type': 'other@',
'id': '',
'name': ''
}
]
| openstack/manila | manila/tests/share/drivers/infortrend/fake_infortrend_nas_data.py | Python | apache-2.0 | 13,722 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetTagKey
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-resourcemanager
# [START cloudresourcemanager_v3_generated_TagKeys_GetTagKey_async]
from google.cloud import resourcemanager_v3
async def sample_get_tag_key():
# Create a client
client = resourcemanager_v3.TagKeysAsyncClient()
# Initialize request argument(s)
request = resourcemanager_v3.GetTagKeyRequest(
name="name_value",
)
# Make the request
response = await client.get_tag_key(request=request)
# Handle the response
print(response)
# [END cloudresourcemanager_v3_generated_TagKeys_GetTagKey_async]
| googleapis/python-resource-manager | samples/generated_samples/cloudresourcemanager_v3_generated_tag_keys_get_tag_key_async.py | Python | apache-2.0 | 1,477 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import codecs
import marshal
import os
import re
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
_GLOBAL_CUSTOM_OBJECTS = {}
@keras_export('keras.utils.CustomObjectScope')
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
@keras_export('keras.utils.custom_object_scope')
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
@keras_export('keras.utils.get_custom_objects')
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_class_and_config(cls_name, cls_config):
"""Returns the serialization of the class with the given config."""
return {'class_name': cls_name, 'config': cls_config}
@keras_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
return serialize_keras_class_and_config(instance.__class__.__name__,
instance.get_config())
if hasattr(instance, '__name__'):
return instance.__name__
raise ValueError('Cannot serialize', instance)
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Returns the class name and config for a serialized keras object."""
if (not isinstance(config, dict) or 'class_name' not in config or
'config' not in config):
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
return (cls, config['config'])
@keras_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
return cls.from_config(
cls_config,
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**cls_config)
elif isinstance(identifier, six.string_types):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif object_name in _GLOBAL_CUSTOM_OBJECTS:
obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError('Unknown ' + printable_module_name + ':' + object_name)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
else:
raise ValueError('Could not interpret serialized ' + printable_module_name +
': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
code = codecs.encode(raw_code, 'base64').decode('ascii')
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Arguments:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
# pylint: disable=pointless-statement
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Arguments:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return name in arg_spec.args
@keras_export('keras.utils.Progbar')
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(self, target, width=30, verbose=1, interval=0.05,
stateful_metrics=None, unit_name='step'):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60,
eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1 or time_per_unit == 0:
info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
elif time_per_unit >= 1e-3:
info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)
else:
info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is not None and current >= self.target:
numdigits = int(np.log10(self.target)) + 1
count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
info = count + info
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
"""
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError('The stop argument has to be None if the value of start '
'is a list.')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
return [
None if x is None else
None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays
]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
if hasattr(start, '__getitem__'):
return arrays[start:stop]
return [None]
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Arguments:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def object_list_uid(object_list):
"""Creates a single string from object ids."""
object_list = nest.flatten(object_list)
return ', '.join([str(abs(id(x))) for x in object_list])
def to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def is_all_none(structure):
iterable = nest.flatten(structure)
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
def check_for_unexpected_keys(name, input_dict, expected_values):
unknown = set(input_dict.keys()).difference(expected_values)
if unknown:
raise ValueError('Unknown entries in {} dictionary: {}. Only expected '
'following keys: {}'.format(name, list(unknown),
expected_values))
def validate_kwargs(kwargs, allowed_kwargs,
error_message='Keyword argument not understood:'):
"""Checks that all keyword arguments are in the set of allowed keys."""
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError(error_message, kwarg)
| alsrgv/tensorflow | tensorflow/python/keras/utils/generic_utils.py | Python | apache-2.0 | 19,553 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
import allure_commons
from allure_pytest.utils import ALLURE_LABEL_PREFIX, ALLURE_LINK_PREFIX
class AllureTestHelper(object):
def __init__(self, config):
self.config = config
@allure_commons.hookimpl
def decorate_as_label(self, label_type, labels):
allure_label_marker = '{prefix}.{label_type}'.format(prefix=ALLURE_LABEL_PREFIX, label_type=label_type)
allure_label = getattr(pytest.mark, allure_label_marker)
return allure_label(*labels, label_type=label_type)
@allure_commons.hookimpl
def decorate_as_link(self, url, link_type, name):
allure_link_marker = '{prefix}.{link_type}'.format(prefix=ALLURE_LINK_PREFIX, link_type=link_type)
pattern = dict(self.config.option.allure_link_pattern).get(link_type, u'{}')
url = pattern.format(url)
allure_link = getattr(pytest.mark, allure_link_marker)
return allure_link(url, name=name, link_type=link_type)
| igogorek/allure-python | allure-pytest/src/helper.py | Python | apache-2.0 | 1,026 |
#
# farmwork/forms.py
#
from django import forms
from django.utils.text import slugify
from .models import Farmwork
# ========================================================
# FARMWORK FORM
# ========================================================
class FarmworkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(FarmworkForm, self).__init__(*args, **kwargs)
class Meta:
model = Farmwork
fields = [
'job_role',
'job_fruit',
'job_pay',
'job_pay_type',
'job_start_date',
'job_duration',
'job_duration_type',
'job_description',
'con_first_name',
'con_surname',
'con_number',
'con_email',
'con_description',
'acc_variety',
'acc_price',
'acc_price_type',
'acc_description',
'loc_street_address',
'loc_city',
'loc_state',
'loc_post_code',
]
# --
# AUTO GENERATE SLUG ON SAVE
# Credit: https://keyerror.com/blog/automatically-generating-unique-slugs-in-django
# --
def save(self):
if self.instance.pk:
return super(FarmworkForm, self).save()
instance = super(FarmworkForm, self).save(commit=False)
instance.slug = slugify(instance.get_job_fruit_display() + '-' + instance.get_job_role_display() + '-in-' + instance.loc_city)
instance.save()
return instance
| ianmilliken/rwf | backend/apps/farmwork/forms.py | Python | apache-2.0 | 1,542 |
from django.contrib.auth.models import AnonymousUser
from core.models import Identity
from api.v2.serializers.post import AccountSerializer
from api.v2.views.base import AdminAuthViewSet
class AccountViewSet(AdminAuthViewSet):
"""
API endpoint that allows providers to be viewed or edited.
"""
lookup_fields = ("id", "uuid")
queryset = Identity.objects.all()
serializer_class = AccountSerializer
http_method_names = ['post', 'head', 'options', 'trace']
def get_queryset(self):
"""
Filter providers by current user
"""
user = self.request.user
if (type(user) == AnonymousUser):
return Identity.objects.none()
identities = user.current_identities()
return identities
| CCI-MOC/GUI-Backend | api/v2/views/account.py | Python | apache-2.0 | 771 |
# Copyright 2017 Priscilla Boyd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The DT_Utils module provides helper functions for Decision Tree algorithms implementation, model creation and
analysis.
"""
import pickle
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from tools.Utils import create_folder_if_not_exists
# noinspection PyTypeChecker
def score_dt(model_name, model, X, y, y_actual, output_folder):
"""
Score a decision tree model.
:param string model_name: title for the model used on the output filename
:param dataframe model: model reference
:param dataframe X: examples
:param dataframe y: targets
:param dataframe y_actual: target results
:param string output_folder: location of the output / results
"""
print("Scoring model...")
model_score = model.score(X, y)
mse = mean_squared_error(y, y_actual)
mse_score = model_name, "- Mean Squared Error:", mse
accuracy = model_name, "- Accuracy score (%):", "{:.2%}".format(model_score)
# write to file
path = output_folder + '/models'
create_folder_if_not_exists(path)
filename = path + '/score_' + model_name + '.txt'
with open(filename, 'w') as scores:
print(mse_score, file=scores)
print(accuracy, file=scores)
scores.close()
print("Scores saved location:", filename)
def plot_dt(model_name, y_actual, y_test, output_folder):
"""
Plot decision tree, y (training) vs y (test/actual).
:param string model_name: title for the model used on the output filename
:param dataframe y_actual: target results
:param dataframe y_test: test targets
:param string output_folder: location of the output / results
"""
# initialise plot path
path = output_folder + '/models'
print("Plotting results...")
plt.scatter(y_actual, y_test, label='Duration')
plt.title('Decision Tree')
plt.plot([0, 1], [0, 1], '--k', transform=plt.gca().transAxes)
plt.xlabel('y (actual)')
plt.ylabel('y (test)')
plt.legend()
plot_path = path + '/plot_' + model_name + '.png'
plt.savefig(plot_path)
print("Plot saved location:", plot_path)
def save_dt_model(model_name, model, folder):
"""
Save model using Pickle binary format.
:param dataframe model: model reference
:param string model_name: title for the model used on the output filename
:param string folder: location of model output
"""
print("Saving model...")
model_file = folder + '/models/' + model_name + '.pkl'
path = open(model_file, 'wb')
pickle.dump(model, path)
print("Model saved location:", model_file)
def load_dt_model(pickle_model):
"""
Retrieve model using Pickle binary format.
:param string pickle_model: location of Pickle model
:return: Pickle model for re-use
:rtype: object
"""
return pickle.loads(pickle_model)
| priscillaboyd/SPaT_Prediction | src/decision_tree/DT_Utils.py | Python | apache-2.0 | 3,533 |
from artnet import *
import SocketServer
import time, os, random, datetime, sys
import argparse
import socket
import struct
from subprocess import Popen, PIPE, STDOUT
import glob
DEBUG = False
UDP_IP = "2.0.0.61"
UDP_PORT = 6454
| ScienceWorldCA/domelights | backend/artnet-bridge/artnet-server.py | Python | apache-2.0 | 234 |
import sys
from drone.actions.emr_launcher import launch_emr_task
from drone.actions.ssh_launcher import launch_ssh_task
from drone.job_runner.dependency_manager import dependencies_are_met
from drone.job_runner.job_progress_checker import check_running_job_progress
from drone.metadata.metadata import get_job_info, job_status, set_ready, set_running, set_failed
task_launcher = {'ssh': launch_ssh_task,
'emr': launch_emr_task}
def process(job_config, settings):
for job_id, schedule_time, execution_time, status, runs, uid in get_job_info(job_config.get('id'),
db_name=settings.metadata):
if status == job_status.get('failed'):
if (int(job_config.get('retry')) if job_config.get('retry') else 0) > int(runs):
settings.logger.debug(
'%s runs %s. set retries %s.' % (job_config.get('id'), runs, job_config.get('retry')))
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
continue
else:
continue
else:
continue
elif status == job_status.get('running'):
check_running_job_progress(job_config, schedule_time, uid, settings)
continue
elif status == job_status.get('ready'):
run(job_config, schedule_time, settings)
elif status == job_status.get('succeeded'):
continue
elif status == job_status.get('not_ready'):
if dependencies_are_met(job_config, schedule_time, settings):
set_ready(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.info('Job "%s" "%s" set as ready' % (job_config.get('id'), schedule_time))
run(job_config, schedule_time, settings)
else:
continue
else:
settings.logger.error('Unknown job status "%s"' % status)
sys.exit(1)
def run(job_config, schedule_time, settings):
settings.logger.info('Starting job "%s" "%s"' % (job_config.get('id'), schedule_time))
job_type = job_config.get('type')
try:
assert job_type in settings.supported_job_types
except:
settings.logger.warning(
'Unsupported job type %s. Valid types are %s' % (job_type, str(settings.supported_job_types)))
task_lauched_successfully, uid = task_launcher.get(job_type)(job_config, schedule_time, settings)
if task_lauched_successfully:
set_running(job_config.get('id'), schedule_time, uid, db_name=settings.metadata)
settings.logger.info('Started job "%s" "%s"' % (job_config.get('id'), schedule_time))
else:
set_failed(job_config.get('id'), schedule_time, db_name=settings.metadata)
settings.logger.warning('Failed to start job "%s" "%s"' % (job_config.get('id'), schedule_time))
| grafke/Drone-workflow-controller | drone/job_runner/job_runner.py | Python | apache-2.0 | 3,220 |
import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
import copy
import json
import re
VARIANT_JSON_REGEX = re.compile("product: ({.*}),")
class ShendronesSpider(CrawlSpider):
name = "shendrones"
allowed_domains = ["shendrones.myshopify.com"]
start_urls = ["http://shendrones.myshopify.com/collections/all"]
rules = (
Rule(LinkExtractor(restrict_css=[".grid-item"]), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
variant = {}
item["variants"] = [variant]
base_url = response.url
item["manufacturer"] = "Shendrones"
# Find the json info for variants.
body = response.body_as_unicode()
m = VARIANT_JSON_REGEX.search(body)
if m:
shopify_info = json.loads(m.group(1))
global_title = shopify_info["title"]
preorder = False
if global_title.endswith("Pre Order"):
global_title = global_title[:-len("Pre Order")].strip()
variant["stock_state"] = "backordered"
preorder = True
for v in shopify_info["variants"]:
if v["title"] != "Default Title":
item["name"] = global_title + " " + v["title"]
variant["url"] = base_url + "?variant=" + str(v["id"])
else:
item["name"] = global_title
variant["url"] = base_url
variant["price"] = "${:.2f}".format(v["price"] / 100)
if not preorder:
if v["inventory_quantity"] <= 0:
if v["inventory_policy"] == "deny":
variant["stock_state"] = "out_of_stock"
else:
variant["stock_state"] = "backordered"
elif v["inventory_quantity"] < 3:
variant["stock_state"] = "low_stock"
variant["stock_text"] = "Only " + str(v["inventory_quantity"]) + " left!"
else:
variant["stock_state"] = "in_stock"
yield item
item = copy.deepcopy(item)
variant = item["variants"][0]
| rcbuild-info/scrape | rcbi/rcbi/spiders/Shendrones.py | Python | apache-2.0 | 2,069 |
from karld.loadump import dump_dicts_to_json_file
from karld.loadump import ensure_dir
from karld.loadump import ensure_file_path_dir
from karld.loadump import i_get_csv_data
from karld.loadump import is_file_csv
from karld.loadump import i_get_json_data
from karld.loadump import is_file_json
from karld.loadump import raw_line_reader
from karld.loadump import split_csv_file
from karld.loadump import split_file
from karld.loadump import split_file_output
from karld.loadump import split_file_output_csv
from karld.loadump import split_file_output_json
from karld.loadump import write_as_csv
from karld.loadump import write_as_json
| johnwlockwood/karl_data | karld/io.py | Python | apache-2.0 | 641 |
# Copyright 2014 Rackspace Hosting
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from mock import Mock, patch
from trove.backup import models as backup_models
from trove.common import cfg
from trove.common import exception
from trove.common.instance import ServiceStatuses
from trove.datastore import models as datastore_models
from trove.instance import models
from trove.instance.models import DBInstance
from trove.instance.models import filter_ips
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import SimpleInstance
from trove.instance.tasks import InstanceTasks
from trove.taskmanager import api as task_api
from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
CONF = cfg.CONF
class SimpleInstanceTest(trove_testtools.TestCase):
def setUp(self):
super(SimpleInstanceTest, self).setUp()
db_info = DBInstance(
InstanceTasks.BUILDING, name="TestInstance")
self.instance = SimpleInstance(
None, db_info, InstanceServiceStatus(
ServiceStatuses.BUILDING), ds_version=Mock(), ds=Mock())
db_info.addresses = {"private": [{"addr": "123.123.123.123"}],
"internal": [{"addr": "10.123.123.123"}],
"public": [{"addr": "15.123.123.123"}]}
self.orig_conf = CONF.network_label_regex
self.orig_ip_regex = CONF.ip_regex
self.orig_black_list_regex = CONF.black_list_regex
def tearDown(self):
super(SimpleInstanceTest, self).tearDown()
CONF.network_label_regex = self.orig_conf
CONF.ip_start = None
def test_get_root_on_create(self):
root_on_create_val = Instance.get_root_on_create(
'redis')
self.assertFalse(root_on_create_val)
def test_filter_ips_white_list(self):
CONF.network_label_regex = '.*'
CONF.ip_regex = '^(15.|123.)'
CONF.black_list_regex = '^10.123.123.*'
ip = self.instance.get_visible_ip_addresses()
ip = filter_ips(
ip, CONF.ip_regex, CONF.black_list_regex)
self.assertEqual(2, len(ip))
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
def test_filter_ips_black_list(self):
CONF.network_label_regex = '.*'
CONF.ip_regex = '.*'
CONF.black_list_regex = '^10.123.123.*'
ip = self.instance.get_visible_ip_addresses()
ip = filter_ips(
ip, CONF.ip_regex, CONF.black_list_regex)
self.assertEqual(2, len(ip))
self.assertTrue('10.123.123.123' not in ip)
def test_one_network_label(self):
CONF.network_label_regex = 'public'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(['15.123.123.123'], ip)
def test_two_network_labels(self):
CONF.network_label_regex = '^(private|public)$'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(2, len(ip))
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
def test_all_network_labels(self):
CONF.network_label_regex = '.*'
ip = self.instance.get_visible_ip_addresses()
self.assertEqual(3, len(ip))
self.assertTrue('10.123.123.123' in ip)
self.assertTrue('123.123.123.123' in ip)
self.assertTrue('15.123.123.123' in ip)
class CreateInstanceTest(trove_testtools.TestCase):
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
def setUp(self):
util.init_db()
self.context = trove_testtools.TroveTestContext(self, is_admin=True)
self.name = "name"
self.flavor_id = 5
self.image_id = "UUID"
self.databases = []
self.users = []
self.datastore = datastore_models.DBDatastore.create(
id=str(uuid.uuid4()),
name='mysql' + str(uuid.uuid4()),
)
self.datastore_version = (
datastore_models.DBDatastoreVersion.create(
id=str(uuid.uuid4()),
datastore_id=self.datastore.id,
name="5.5" + str(uuid.uuid4()),
manager="mysql",
image_id="image_id",
packages="",
active=True))
self.volume_size = 1
self.az = "az"
self.nics = None
self.configuration = None
self.tenant_id = "UUID"
self.datastore_version_id = str(uuid.uuid4())
self.db_info = DBInstance.create(
name=self.name, flavor_id=self.flavor_id,
tenant_id=self.tenant_id,
volume_size=self.volume_size,
datastore_version_id=self.datastore_version.id,
task_status=InstanceTasks.BUILDING,
configuration_id=self.configuration
)
self.backup_name = "name"
self.descr = None
self.backup_state = backup_models.BackupState.COMPLETED
self.instance_id = self.db_info.id
self.parent_id = None
self.deleted = False
self.backup = backup_models.DBBackup.create(
name=self.backup_name,
description=self.descr,
tenant_id=self.tenant_id,
state=self.backup_state,
instance_id=self.instance_id,
parent_id=self.parent_id,
datastore_version_id=self.datastore_version.id,
deleted=False
)
self.backup.size = 1.1
self.backup.save()
self.backup_id = self.backup.id
self.orig_client = models.create_nova_client
models.create_nova_client = nova.fake_create_nova_client
self.orig_api = task_api.API(self.context).create_instance
task_api.API(self.context).create_instance = Mock()
self.run_with_quotas = models.run_with_quotas
models.run_with_quotas = Mock()
self.check = backup_models.DBBackup.check_swift_object_exist
backup_models.DBBackup.check_swift_object_exist = Mock(
return_value=True)
super(CreateInstanceTest, self).setUp()
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
def tearDown(self):
self.db_info.delete()
self.backup.delete()
self.datastore.delete()
self.datastore_version.delete()
models.create_nova_client = self.orig_client
task_api.API(self.context).create_instance = self.orig_api
models.run_with_quotas = self.run_with_quotas
backup_models.DBBackup.check_swift_object_exist = self.check
self.backup.delete()
self.db_info.delete()
super(CreateInstanceTest, self).tearDown()
def test_exception_on_invalid_backup_size(self):
self.assertEqual(self.backup.id, self.backup_id)
exc = self.assertRaises(
exception.BackupTooLarge, models.Instance.create,
self.context, self.name, self.flavor_id,
self.image_id, self.databases, self.users,
self.datastore, self.datastore_version,
self.volume_size, self.backup_id,
self.az, self.nics, self.configuration
)
self.assertIn("Backup is too large for "
"given flavor or volume.", str(exc))
def test_can_restore_from_backup_with_almost_equal_size(self):
# target size equals to "1Gb"
self.backup.size = 0.99
self.backup.save()
instance = models.Instance.create(
self.context, self.name, self.flavor_id,
self.image_id, self.databases, self.users,
self.datastore, self.datastore_version,
self.volume_size, self.backup_id,
self.az, self.nics, self.configuration)
self.assertIsNotNone(instance)
class TestReplication(trove_testtools.TestCase):
def setUp(self):
util.init_db()
self.datastore = datastore_models.DBDatastore.create(
id=str(uuid.uuid4()),
name='name' + str(uuid.uuid4()),
default_version_id=str(uuid.uuid4()))
self.datastore_version = datastore_models.DBDatastoreVersion.create(
id=self.datastore.default_version_id,
name='name' + str(uuid.uuid4()),
image_id=str(uuid.uuid4()),
packages=str(uuid.uuid4()),
datastore_id=self.datastore.id,
manager='mysql',
active=1)
self.master = DBInstance(
InstanceTasks.NONE,
id=str(uuid.uuid4()),
name="TestMasterInstance",
datastore_version_id=self.datastore_version.id)
self.master.set_task_status(InstanceTasks.NONE)
self.master.save()
self.master_status = InstanceServiceStatus(
ServiceStatuses.RUNNING,
id=str(uuid.uuid4()),
instance_id=self.master.id)
self.master_status.save()
self.safe_nova_client = models.create_nova_client
models.create_nova_client = nova.fake_create_nova_client
super(TestReplication, self).setUp()
def tearDown(self):
self.master.delete()
self.master_status.delete()
self.datastore.delete()
self.datastore_version.delete()
models.create_nova_client = self.safe_nova_client
super(TestReplication, self).tearDown()
@patch('trove.instance.models.LOG')
def test_replica_of_not_active_master(self, mock_logging):
self.master.set_task_status(InstanceTasks.BUILDING)
self.master.save()
self.master_status.set_status(ServiceStatuses.BUILDING)
self.master_status.save()
self.assertRaises(exception.UnprocessableEntity,
Instance.create,
None, 'name', 1, "UUID", [], [], None,
self.datastore_version, 1,
None, slave_of_id=self.master.id)
@patch('trove.instance.models.LOG')
def test_replica_with_invalid_slave_of_id(self, mock_logging):
self.assertRaises(exception.NotFound,
Instance.create,
None, 'name', 1, "UUID", [], [], None,
self.datastore_version, 1,
None, slave_of_id=str(uuid.uuid4()))
def test_create_replica_from_replica(self):
self.replica_datastore_version = Mock(
spec=datastore_models.DBDatastoreVersion)
self.replica_datastore_version.id = "UUID"
self.replica_datastore_version.manager = 'mysql'
self.replica_info = DBInstance(
InstanceTasks.NONE,
id="UUID",
name="TestInstance",
datastore_version_id=self.replica_datastore_version.id,
slave_of_id=self.master.id)
self.replica_info.save()
self.assertRaises(exception.Forbidden, Instance.create,
None, 'name', 2, "UUID", [], [], None,
self.datastore_version, 1,
None, slave_of_id=self.replica_info.id)
| redhat-openstack/trove | trove/tests/unittests/instance/test_instance_models.py | Python | apache-2.0 | 11,729 |
# Copyright 2016 Nuage Netowrks USA Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import copy
import os
import oslo_messaging
import six
from neutron.agent.linux import ip_lib
from neutron.common import rpc as n_rpc
from neutron import context
from neutron_lib import constants
from neutron_lib.plugins import directory
from neutron_vpnaas.services.vpn import device_drivers
from neutron_vpnaas.services.vpn.device_drivers import fedora_strongswan_ipsec
from neutron_vpnaas.services.vpn.device_drivers import ipsec
from neutron_vpnaas.services.vpn.device_drivers import strongswan_ipsec
from nuage_neutron.vpnaas.common import topics
from nuage_neutron.vpnaas.nuage_interface import NuageInterfaceDriver
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(os.path.abspath(__file__))
IPSEC_CONNS = 'ipsec_site_connections'
class NuageIPsecVpnDriverApi(object):
"""IPSecVpnDriver RPC api."""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_vpn_services_on_host(self, context, host):
"""Get list of vpnservices.
The vpnservices including related ipsec_site_connection,
ikepolicy and ipsecpolicy on this host
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'get_vpn_services_on_host', host=host)
def update_status(self, context, status):
"""Update local status.
This method call updates status attribute of
VPNServices.
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'update_status', status=status)
@six.add_metaclass(abc.ABCMeta)
class NuageIPsecDriver(device_drivers.DeviceDriver):
def __init__(self, vpn_service, host):
self.conf = vpn_service.conf
self.host = host
self.conn = n_rpc.create_connection(new=True)
self.context = context.get_admin_context_without_session()
self.topic = topics.NUAGE_IPSEC_AGENT_TOPIC
self.processes = {}
self.routers = {}
self.process_status_cache = {}
self.endpoints = [self]
self.conn.create_consumer(self.topic, self.endpoints)
self.conn.consume_in_threads()
self.agent_rpc = NuageIPsecVpnDriverApi(
topics.NUAGE_IPSEC_DRIVER_TOPIC)
self.process_status_cache_check = loopingcall.FixedIntervalLoopingCall(
self.report_status, self.context)
self.process_status_cache_check.start(
interval=20)
self.nuage_if_driver = NuageInterfaceDriver(cfg.CONF)
def _get_l3_plugin(self):
return directory.get_plugin(constants.L3)
def get_namespace(self, router_id):
"""Get namespace of router.
:router_id: router_id
:returns: namespace string.
"""
return 'vpn-' + router_id
def vpnservice_updated(self, context, **kwargs):
"""Vpnservice updated rpc handler
VPN Service Driver will call this method
when vpnservices updated.
Then this method start sync with server.
"""
router = kwargs.get('router', None)
self.sync(context, [router] if router else [])
def tracking(self, context, **kwargs):
"""Handling create router event.
Agent calls this method, when the process namespace is ready.
Note: process_id == router_id == vpnservice_id
"""
router = kwargs.get('router', None)
process_id = router['id']
self.routers[process_id] = process_id
if process_id in self.processes:
# In case of vpnservice is created
# before vpn service namespace
process = self.processes[process_id]
process.enable()
def non_tracking(self, context, **kwargs):
router = kwargs.get('router', None)
process_id = router['id']
self.destroy_process(process_id)
if process_id in self.routers:
del self.routers[process_id]
def ensure_process(self, process_id, vpnservice=None):
"""Ensuring process.
If the process doesn't exist, it will create process
and store it in self.processs
"""
process = self.processes.get(process_id)
if not process or not process.namespace:
namespace = self.get_namespace(process_id)
process = self.create_process(
process_id,
vpnservice,
namespace)
self.processes[process_id] = process
elif vpnservice:
process.update_vpnservice(vpnservice)
return process
@lockutils.synchronized('vpn-agent', 'neutron-')
def sync(self, context, routers):
"""Sync status with server side.
:param context: context object for RPC call
:param routers: Router objects which is created in this sync event
There could be many failure cases should be
considered including the followings.
1) Agent class restarted
2) Failure on process creation
3) VpnService is deleted during agent down
4) RPC failure
In order to handle, these failure cases,
the driver needs to take sync strategies.
"""
vpnservices = self.agent_rpc.get_vpn_services_on_host(
context, self.host)
router_ids = [vpnservice['router_id'] for vpnservice in vpnservices]
sync_router_ids = [router['id'] for router in routers]
self._sync_vpn_processes(vpnservices, sync_router_ids)
self._delete_vpn_processes(sync_router_ids, router_ids)
self._cleanup_stale_vpn_processes(router_ids)
self.report_status(context)
def get_process_status_cache(self, process):
if not self.process_status_cache.get(process.id):
self.process_status_cache[process.id] = {
'status': None,
'id': process.vpnservice['id'],
'updated_pending_status': False,
'ipsec_site_connections': {}}
return self.process_status_cache[process.id]
def report_status(self, context):
status_changed_vpn_services = []
for process in self.processes.values():
previous_status = self.get_process_status_cache(process)
if self.is_status_updated(process, previous_status):
new_status = self.copy_process_status(process)
self.update_downed_connections(process.id, new_status)
status_changed_vpn_services.append(new_status)
self.process_status_cache[process.id] = (
self.copy_process_status(process))
# We need unset updated_pending status after it
# is reported to the server side
self.unset_updated_pending_status(process)
if status_changed_vpn_services:
self.agent_rpc.update_status(context,
status_changed_vpn_services)
def _sync_vpn_processes(self, vpnservices, sync_router_ids):
for vpnservice in vpnservices:
if vpnservice['router_id'] not in self.processes or (
vpnservice['router_id'] in sync_router_ids):
process = self.ensure_process(vpnservice['router_id'],
vpnservice=vpnservice)
router = self.routers.get(vpnservice['router_id'])
if not router:
continue
process.update()
def _delete_vpn_processes(self, sync_router_ids, vpn_router_ids):
for process_id in sync_router_ids:
if process_id not in vpn_router_ids:
self.destroy_process(process_id)
def _cleanup_stale_vpn_processes(self, vpn_router_ids):
process_ids = [pid for pid in self.processes
if pid not in vpn_router_ids]
for process_id in process_ids:
self.destroy_process(process_id)
def is_status_updated(self, process, previous_status):
if process.updated_pending_status:
return True
if process.status != previous_status['status']:
return True
if (process.connection_status !=
previous_status['ipsec_site_connections']):
return True
def unset_updated_pending_status(self, process):
process.updated_pending_status = False
for connection_status in process.connection_status.values():
connection_status['updated_pending_status'] = False
def copy_process_status(self, process):
return {
'id': process.vpnservice['id'],
'status': process.status,
'updated_pending_status': process.updated_pending_status,
'ipsec_site_connections': copy.deepcopy(process.connection_status)
}
def update_downed_connections(self, process_id, new_status):
"""Update info to be reported, if connections just went down.
If there is no longer any information for a connection, because it
has been removed (e.g. due to an admin down of VPN service or IPSec
connection), but there was previous status information for the
connection, mark the connection as down for reporting purposes.
"""
if process_id in self.process_status_cache:
for conn in self.process_status_cache[process_id][IPSEC_CONNS]:
if conn not in new_status[IPSEC_CONNS]:
new_status[IPSEC_CONNS][conn] = {
'status': constants.DOWN,
'updated_pending_status': True
}
def create_router(self, router):
"""Handling create router event."""
pass
def destroy_router(self, process_id):
pass
def destroy_process(self, process_id):
"""Destroy process.
Disable the process and remove the process
manager for the processes that no longer are running vpn service.
"""
if process_id in self.processes:
process = self.processes[process_id]
process.disable()
if process_id in self.processes:
del self.processes[process_id]
def plug_to_ovs(self, context, **kwargs):
self.nuage_if_driver.plug(kwargs['network_id'], kwargs['port_id'],
kwargs['device_name'], kwargs['mac'],
'alubr0', kwargs['ns_name'])
self.nuage_if_driver.init_l3(kwargs['device_name'], kwargs['cidr'],
kwargs['ns_name'])
device = ip_lib.IPDevice(kwargs['device_name'],
namespace=kwargs['ns_name'])
for gateway_ip in kwargs['gw_ip']:
device.route.add_gateway(gateway_ip)
def unplug_from_ovs(self, context, **kwargs):
self.nuage_if_driver.unplug(kwargs['device_name'], 'alubr0',
kwargs['ns_name'])
ip = ip_lib.IPWrapper(kwargs['ns_name'])
ip.garbage_collect_namespace()
# On Redhat deployments an additional directory is created named
# 'ip_vti0' in the namespace which prevents the cleanup
# of namespace by the neutron agent in 'ip_lib.py' which we clean.
if kwargs['ns_name'] in ip.get_namespaces():
ip.netns.delete(kwargs['ns_name'])
class NuageOpenSwanDriver(NuageIPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return ipsec.OpenSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
class NuageStrongSwanDriver(NuageIPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return strongswan_ipsec.StrongSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
class NuageStrongSwanDriverFedora(NuageIPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return fedora_strongswan_ipsec.FedoraStrongSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
| naveensan1/nuage-openstack-neutron | nuage_neutron/vpnaas/device_drivers/driver.py | Python | apache-2.0 | 12,933 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from api import views
admin.autodiscover()
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'headings', views.HeadingViewSet)
router.register(r'users', views.UserViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
) | timokoola/okrest | okrest/okrest/urls.py | Python | apache-2.0 | 449 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from six import iteritems
from zipline.finance import trading
import pandas as pd
from . import risk
from . risk import (
alpha,
check_entry,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns,
benchmark_returns=None):
treasury_curves = trading.environment.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = trading.environment.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns)
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date
)
self.sharpe = self.calculate_sharpe()
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = trading.environment.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self, mar=None):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
if mar is None:
mar = self.treasury_period_return
return sortino_ratio(self.algorithm_returns,
self.algorithm_period_returns,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.algorithm_returns) < 2:
return 0.0, 0.0, 0.0, 0.0, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
# BUG? Shouldn't this be set to log(1.0 + 0) ?
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
| lsbardel/zipline | zipline/finance/risk/period.py | Python | apache-2.0 | 9,654 |
#!/usr/bin/python
#-*-coding:utf8-*-
from bs4 import BeautifulSoup as Soup
#import pandas as pd
import glob
import sys
import re
"""
Version xml de cfdi 3.3
"""
class CFDI(object):
def __init__(self, f):
"""
Constructor que requiere en el parámetro una cadena con el nombre del
cfdi.
"""
fxml = open(f,'r').read()
soup = Soup(fxml,'lxml')
#============componentes del cfdi============
emisor = soup.find('cfdi:emisor')
receptor = soup.find('cfdi:receptor')
comprobante = soup.find('cfdi:comprobante')
tfd = soup.find('tfd:timbrefiscaldigital')
self.__version = comprobante['version']
self.__folio = comprobante['folio']
self.__uuid = tfd['uuid']
self.__fechatimbrado = tfd['fechatimbrado']
self.__traslados = soup.find_all(lambda e: e.name=='cfdi:traslado' and
sorted(e.attrs.keys())==['importe','impuesto','tasaocuota','tipofactor'])
self.__retenciones = soup.find_all(lambda e: e.name=='cfdi:retencion' and
sorted(e.attrs.keys())==['importe','impuesto'])
#============emisor==========================
self.__emisorrfc = emisor['rfc']
try:
self.__emisornombre = emisor['nombre']
except:
self.__emisornombre = emisor['rfc']
#============receptor========================
self.__receptorrfc = receptor['rfc']
try:
self.__receptornombre = receptor['nombre']
except:
self.__receptornombre = receptor['rfc']
#============comprobante=====================
self.__certificado = comprobante['certificado']
self.__sello = comprobante['sello']
self.__total = round(float(comprobante['total']),2)
self.__subtotal = round(float(comprobante['subtotal']),2)
self.__fecha_cfdi = comprobante['fecha']
self.__conceptos = soup.find_all(lambda e: e.name=='cfdi:concepto')
self.__n_conceptos = len(self.__conceptos)
try:
self.__moneda = comprobante['moneda']
except KeyError as k:
self.__moneda = 'MXN'
try:
self.__lugar = comprobante['lugarexpedicion']
except KeyError as k:
self.__lugar = u'México'
tipo = comprobante['tipodecomprobante']
if(float(self.__version)==3.2):
self.__tipo = tipo
else:
tcomprobantes = {'I':'Ingreso', 'E':'Egreso', 'N':'Nomina', 'P':'Pagado'}
self.__tipo = tcomprobantes[tipo]
try:
self.__tcambio = float(comprobante['tipocambio'])
except:
self.__tcambio = 1.
triva, trieps, trisr = self.__calcula_traslados()
self.__triva = round(triva,2)
self.__trieps = round(trieps,2)
self.__trisr = round(trisr,2)
retiva, retisr = self.__calcula_retenciones()
self.__retiva = round(retiva,2)
self.__retisr = round(retisr,2)
def __str__(self):
"""
Imprime el cfdi en el siguiente orden
emisor, fecha de timbrado, tipo de comprobante, rfc emisor, uuid,_
receptor, rfc receptor, subtotal, ieps, iva, retiva, retisr, tc, total
"""
respuesta = '\t'.join( map(str, self.lista_valores))
return respuesta
def __calcula_traslados(self):
triva, trieps, trisr = 0., 0., 0
for t in self.__traslados:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if impuesto=='IVA':
triva += importe
elif impuesto=='ISR':
trisr += importe
elif impuesto=='IEPS':
trieps += importe
elif(self.__version=='3.3'):
if impuesto=='002':
triva += importe
elif impuesto=='001':
trisr += importe
elif impuesto=='003':
trieps += importe
return triva, trieps, trisr
def __calcula_retenciones(self):
retiva, retisr = 0., 0.
for t in self.__retenciones:
impuesto = t['impuesto']
importe = float(t['importe'])
if(self.__version=='3.2'):
if(impuesto=='ISR'):
retisr += importe
elif(impuesto=='IVA'):
retiva += importe
elif(self.__version=='3.3'):
if(impuesto=='002'):
retiva += importe
elif(impuesto=='001'):
retisr += importe
return retiva, retisr
@property
def lista_valores(self):
v = [self.__emisornombre,self.__fechatimbrado, self.__tipo, self.__emisorrfc ]
v += [self.__uuid, self.__folio, self.__receptornombre, self.__receptorrfc ]
v += [self.__subtotal, self.__trieps, self.__triva]
v += [self.__retiva, self.__retisr, self.__tcambio, self.__total]
return v
@property
def dic_cfdi(self):
d = {}
d["Emisor"] = self.__emisornombre
d["Fecha_CFDI"] = self.__fechatimbrado
d["Tipo"] = self.__tipo
d["RFC_Emisor"] = self.__emisorrfc
d["Folio_fiscal"] = self.__uuid
d["Folio"] = self.__folio
d["Receptor"] = self.__receptornombre
d["RFC_Receptor"] = self.__receptorrfc
d["Subtotal"] = self.__subtotal
d["IEPS"] = self.__trieps
d["IVA"] = self.__triva
d["Ret IVA"] = self.__retiva
d["Ret ISR"] = self.__retisr
d["TC"] = self.__tcambio
d["Total"] = self.__total
return d
@property
def certificado(self):
return self.__certificado
@property
def sello(self):
return self.__sello
@property
def total(self):
return self.__total
@property
def subtotal(self):
return self.__subtotal
@property
def fechatimbrado(self):
return self.__fechatimbrado
@property
def tipodecambio(self):
return self.__tcambio
@property
def lugar(self):
return self.__lugar
@property
def moneda(self):
return self.__moneda
@property
def traslado_iva(self):
return self.__triva
@property
def traslado_isr(self):
return self.__trisr
@property
def traslado_ieps(self):
return self.__trieps
@property
def n_conceptos(self):
return self.__n_conceptos
@property
def conceptos(self):
return self.__conceptos
@property
def folio(self):
return self.__folio
@staticmethod
def columnas():
return ["Emisor","Fecha_CFDI","Tipo","RFC_Emisor","Folio_fiscal","Folio","Receptor",
"RFC_Receptor", "Subtotal","IEPS","IVA","Ret IVA","Ret ISR","TC","Total"]
@staticmethod
def imprime_reporte(nf, nr):
reporte = "Número de archivos procesados:\t {}\n".format(nf)
reporte += "Número de filas en tsv:\t {}\n".format(nr)
if(nf!=nr):
reporte += "\n\n**** Atención ****\n"
return reporte
L = glob.glob('./*.xml')
#R = [ patt[1:].strip().lower() for patt in re.findall('(<cfdi:[A-z]*\s|<tfd:[A-z]*\s)',fxml)]
if __name__=='__main__':
salida = sys.argv[1]
fout = open(salida,'w')
columnas = CFDI.columnas()
titulo = '\t'.join(columnas)+'\n'
fout.write(titulo)
nl = 0
for f in L:
try:
#print("abriendo {0}".format(f))
rcfdi = CFDI(f)
dic = rcfdi.dic_cfdi
vals = [dic[c] for c in columnas]
strvals = ' \t '.join(map(str, vals))+'\n'
fout.write(strvals)
nl += 1
except:
assert "Error en archivo {0}".format(f)
fout.close()
nr = len(L)
rep = CFDI.imprime_reporte(nr, nl)
print(rep)
| sergiohzlz/lectorcfdi | extrainfo.py | Python | apache-2.0 | 8,345 |
#!/usr/bin/python3
from colorama import Fore, Back
class frets:
tuning = list()
max_string_name_len = 0;
frets_count = 0;
strings = dict()
NOTES = ('E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B', 'C', 'C#', 'D', 'D#')
def __init__(self,
tuning=('E', 'A', 'D', 'G'),
frets_count=24):
self.tuning = tuning
self.frets_count = frets_count
for string in tuning:
if len(string) > self.max_string_name_len:
self.max_string_name_len = len(string)
padding_count = 0;
padding = ''
self.strings[string] = list()
starting_note = self.NOTES.index(string) + 1
for i in range(frets_count):
padding = '^' * int(((starting_note + i) / len(self.NOTES)))
self.strings[string].append(self.NOTES[(starting_note + i) % len(self.NOTES)] + padding)
#print('{}{} ({}) = {}'.format(string,
# i,
# int(((starting_note + i) / len(self.NOTES))),
# self.NOTES[(starting_note + i) % len(self.NOTES)] + padding))
def debug_strings(self):
print(self.strings)
def show_me_plz(self,
seek_note=None,
seek_string=None):
if (seek_string):
seek_note = self.strings[seek_string[0]][int(seek_string[1]) - 1]
upper_seek_note = None
lower_seek_note = None
if seek_note and seek_note.endswith('^'):
lower_seek_note = seek_note[0:-1]
if seek_note:
upper_seek_note = seek_note + '^'
upper_found_position = list()
found_position = list()
lower_found_position = list()
print(Fore.WHITE + \
' ' * (self.max_string_name_len + 2),
end='')
for fret_nr in range(1, self.frets_count + 1):
print(Fore.WHITE + \
(' ' * (4 - len(str(fret_nr)))) + str(fret_nr),
end='')
print(Fore.YELLOW + '|', end='')
print('')
for string in reversed(self.tuning):
color = Fore.WHITE + Back.BLACK
if string == seek_note:
color = Fore.WHITE + Back.RED
found_position.append(string + "0")
elif string == upper_seek_note:
color = Fore.WHITE + Back.CYAN
upper_found_position.append(string + "0")
elif string == lower_seek_note:
color = Fore.WHITE + Back.MAGENTA
lower_found_position.append(string + "0")
print(color + \
(' ' * (self.max_string_name_len - len(string))) + \
string, end='')
print(Fore.YELLOW + '||', end='')
fret_nr = 1
for note in self.strings[string]:
color = Fore.WHITE + Back.BLACK
if note == seek_note:
color = Fore.WHITE + Back.RED
found_position.append(string + str(fret_nr))
elif note == upper_seek_note:
color = Fore.WHITE + Back.CYAN
upper_found_position.append(string + str(fret_nr))
elif note == lower_seek_note:
color = Fore.WHITE + Back.MAGENTA
lower_found_position.append(string + str(fret_nr))
print(color + \
note[0:4] + \
'-' * (4 - len(note)), end='')
print(Fore.YELLOW + Back.BLACK + '|', end='')
fret_nr += 1
print(Fore.WHITE + Back.BLACK + '')
print(Fore.WHITE + '\n')
print(Back.CYAN + ' ' + Back.BLACK + \
' Found octave-higher note {} on: {}'.format(upper_seek_note,
upper_found_position))
print(Back.RED + ' ' + Back.BLACK + \
' Found note {} on: {}'.format(seek_note,
found_position))
print(Fore.WHITE + \
Back.MAGENTA + ' ' + Back.BLACK + \
' Found octave-lower note {} on: {}'.format(lower_seek_note,
lower_found_position))
| mariuszlitwin/frets | frets.py | Python | apache-2.0 | 4,690 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os_resource_classes as orc
import os_traits
import six
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
from nova import utils
class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
compute_driver = 'fake.SmallFakeDriver'
def test_compute_service_delete_ensure_related_cleanup(self):
"""Tests deleting a compute service and the related cleanup associated
with that like the compute_nodes table entry, removing the host
from any aggregates, the host mapping in the API DB and the associated
resource provider in Placement.
"""
compute = self._start_compute('host1')
# Make sure our compute host is represented as expected.
services = self.admin_api.get_services(binary='nova-compute')
self.assertEqual(1, len(services))
service = services[0]
# Now create a host aggregate and add our host to it.
aggregate = self.admin_api.post_aggregate(
{'aggregate': {'name': 'agg1'}})
self.admin_api.add_host_to_aggregate(aggregate['id'], service['host'])
# Make sure the host is in the aggregate.
aggregate = self.admin_api.api_get(
'/os-aggregates/%s' % aggregate['id']).body['aggregate']
self.assertEqual([service['host']], aggregate['hosts'])
rp_uuid = self._get_provider_uuid_by_host(service['host'])
# We'll know there is a host mapping implicitly if os-hypervisors
# returned something in _get_provider_uuid_by_host, but let's also
# make sure the host mapping is there like we expect.
ctxt = nova_context.get_admin_context()
objects.HostMapping.get_by_host(ctxt, service['host'])
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Make sure the resource provider has inventory.
inventories = self._get_provider_inventory(rp_uuid)
# Expect a minimal set of inventory for the fake virt driver.
for resource_class in [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB]:
self.assertIn(resource_class, inventories)
# Now create a server so that the resource provider has some allocation
# records.
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, service['host'])
# Now the fun part, delete the compute service and make sure related
# resources are cleaned up, like the compute node, host mapping, and
# resource provider. We have to first stop the compute service so
# it doesn't recreate the compute node during the
# update_available_resource periodic task.
self.admin_api.put_service(service['id'], {'forced_down': True})
compute.stop()
# The first attempt should fail since there is an instance on the
# compute host.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertIn('Unable to delete compute service that is hosting '
'instances.', six.text_type(ex))
self.assertEqual(409, ex.response.status_code)
# Now delete the instance and wait for it to be gone.
self._delete_and_check_allocations(server)
# Now we can delete the service.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# Make sure the service is deleted.
services = self.admin_api.get_services(binary='nova-compute')
self.assertEqual(0, len(services))
# Make sure the host was removed from the aggregate.
aggregate = self.admin_api.api_get(
'/os-aggregates/%s' % aggregate['id']).body['aggregate']
self.assertEqual([], aggregate['hosts'])
# Trying to get the hypervisor should result in a 404.
self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % service['host'],
check_response_status=[404])
# The host mapping should also be gone.
self.assertRaises(exception.HostMappingNotFound,
objects.HostMapping.get_by_host,
ctxt, service['host'])
# And finally, the resource provider should also be gone. The API
# will perform a cascading delete of the resource provider inventory
# and allocation information.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(404, resp.status)
def test_evacuate_then_delete_compute_service(self):
"""Tests a scenario where a server is created on a host, the host
goes down, the server is evacuated to another host, and then the
source host compute service is deleted. After that the deleted
compute service is restarted. Related placement resources are checked
throughout.
"""
# Create our source host that we will evacuate *from* later.
host1 = self._start_compute('host1')
# Create a server which will go on host1 since it is the only host.
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, 'host1')
# Get the compute service record for host1 so we can manage it.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# Get the corresponding resource provider uuid for host1.
rp_uuid = self._get_provider_uuid_by_host(service['host'])
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Down the compute service for host1 so we can evacuate from it.
self.admin_api.put_service(service['id'], {'forced_down': True})
host1.stop()
# Start another host and trigger the server evacuate to that host.
self._start_compute('host2')
self.admin_api.post_server_action(server['id'], {'evacuate': {}})
# The host does not change until after the status is changed to ACTIVE
# so wait for both parameters.
self._wait_for_server_parameter(server, {
'status': 'ACTIVE',
'OS-EXT-SRV-ATTR:host': 'host2'})
# Delete the compute service for host1 and check the related
# placement resources for that host.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# Make sure the service is gone.
services = self.admin_api.get_services(
binary='nova-compute', host='host1')
self.assertEqual(0, len(services), services)
# FIXME(mriedem): This is bug 1829479 where the compute service is
# deleted but the resource provider is not because there are still
# allocations against the provider from the evacuated server.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(rp_uuid, flavor)
# Try to restart the host1 compute service to create a new resource
# provider.
self.restart_compute_service(host1)
# FIXME(mriedem): This is bug 1817833 where restarting the now-deleted
# compute service attempts to create a new resource provider with a
# new uuid but the same name which results in a conflict. The service
# does not die, however, because _update_available_resource_for_node
# catches and logs but does not re-raise the error.
log_output = self.stdlog.logger.output
self.assertIn('Error updating resources for node host1.', log_output)
self.assertIn('Failed to create resource provider host1', log_output)
def test_migrate_confirm_after_deleted_source_compute(self):
"""Tests a scenario where a server is cold migrated and while in
VERIFY_RESIZE status the admin attempts to delete the source compute
and then the user tries to confirm the resize.
"""
# Start a compute service and create a server there.
self._start_compute('host1')
host1_rp_uuid = self._get_provider_uuid_by_host('host1')
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, 'host1')
# Start a second compute service so we can cold migrate there.
self._start_compute('host2')
host2_rp_uuid = self._get_provider_uuid_by_host('host2')
# Cold migrate the server to host2.
self._migrate_and_check_allocations(
server, flavor, host1_rp_uuid, host2_rp_uuid)
# Delete the source compute service.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# We expect the delete request to fail with a 409 error because of the
# instance in VERIFY_RESIZE status even though that instance is marked
# as being on host2 now.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertEqual(409, ex.response.status_code)
self.assertIn('Unable to delete compute service that has in-progress '
'migrations', six.text_type(ex))
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor)
# Now try to confirm the migration.
self._confirm_resize(server)
# Delete the host1 service since the migration is confirmed and the
# server is on host2.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# The host1 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(404, resp.status)
def test_resize_revert_after_deleted_source_compute(self):
"""Tests a scenario where a server is resized and while in
VERIFY_RESIZE status the admin attempts to delete the source compute
and then the user tries to revert the resize.
"""
# Start a compute service and create a server there.
self._start_compute('host1')
host1_rp_uuid = self._get_provider_uuid_by_host('host1')
flavors = self.api.get_flavors()
flavor1 = flavors[0]
flavor2 = flavors[1]
server = self._boot_and_check_allocations(flavor1, 'host1')
# Start a second compute service so we can resize there.
self._start_compute('host2')
host2_rp_uuid = self._get_provider_uuid_by_host('host2')
# Resize the server to host2.
self._resize_and_check_allocations(
server, flavor1, flavor2, host1_rp_uuid, host2_rp_uuid)
# Delete the source compute service.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# We expect the delete request to fail with a 409 error because of the
# instance in VERIFY_RESIZE status even though that instance is marked
# as being on host2 now.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertEqual(409, ex.response.status_code)
self.assertIn('Unable to delete compute service that has in-progress '
'migrations', six.text_type(ex))
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
# Now revert the resize.
self._revert_resize(server)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
zero_flavor = {'vcpus': 0, 'ram': 0, 'disk': 0, 'extra_specs': {}}
self.assertFlavorMatchesUsage(host2_rp_uuid, zero_flavor)
# Delete the host2 service since the migration is reverted and the
# server is on host1 again.
service2 = self.admin_api.get_services(
binary='nova-compute', host='host2')[0]
self.admin_api.api_delete('/os-services/%s' % service2['id'])
# The host2 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host2_rp_uuid)
self.assertEqual(404, resp.status)
class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests the API, compute service and Placement interaction with the
COMPUTE_STATUS_DISABLED trait when a compute service is enable/disabled.
This version of the test uses the 2.latest microversion for testing the
2.53+ behavior of the PUT /os-services/{service_id} API.
"""
compute_driver = 'fake.SmallFakeDriver'
def _update_service(self, service, disabled, forced_down=None):
"""Update the service using the 2.53 request schema.
:param service: dict representing the service resource in the API
:param disabled: True if the service should be disabled, False if the
service should be enabled
:param forced_down: Optionally change the forced_down value.
"""
status = 'disabled' if disabled else 'enabled'
req = {'status': status}
if forced_down is not None:
req['forced_down'] = forced_down
self.admin_api.put_service(service['id'], req)
def test_compute_status_filter(self):
"""Tests the compute_status_filter placement request filter"""
# Start a compute service so a compute node and resource provider is
# created.
compute = self._start_compute('host1')
# Get the UUID of the resource provider that was created.
rp_uuid = self._get_provider_uuid_by_host('host1')
# Get the service from the compute API.
services = self.admin_api.get_services(binary='nova-compute',
host='host1')
self.assertEqual(1, len(services))
service = services[0]
# At this point, the service should be enabled and the
# COMPUTE_STATUS_DISABLED trait should not be set on the
# resource provider in placement.
self.assertEqual('enabled', service['status'])
rp_traits = self._get_provider_traits(rp_uuid)
trait = os_traits.COMPUTE_STATUS_DISABLED
self.assertNotIn(trait, rp_traits)
# Now disable the compute service via the API.
self._update_service(service, disabled=True)
# The update to placement should be synchronous so check the provider
# traits and COMPUTE_STATUS_DISABLED should be set.
rp_traits = self._get_provider_traits(rp_uuid)
self.assertIn(trait, rp_traits)
# Try creating a server which should fail because nothing is available.
networks = [{'port': self.neutron.port_1['id']}]
server_req = self._build_server(networks=networks)
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ERROR')
# There should be a NoValidHost fault recorded.
self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message'])
# Now enable the service and the trait should be gone.
self._update_service(service, disabled=False)
rp_traits = self._get_provider_traits(rp_uuid)
self.assertNotIn(trait, rp_traits)
# Try creating another server and it should be OK.
server = self.api.post_server({'server': server_req})
self._wait_for_state_change(server, 'ACTIVE')
# Stop, force-down and disable the service so the API cannot call
# the compute service to sync the trait.
compute.stop()
self._update_service(service, disabled=True, forced_down=True)
# The API should have logged a message about the service being down.
self.assertIn('Compute service on host host1 is down. The '
'COMPUTE_STATUS_DISABLED trait will be synchronized '
'when the service is restarted.',
self.stdlog.logger.output)
# The trait should not be on the provider even though the node is
# disabled.
rp_traits = self._get_provider_traits(rp_uuid)
self.assertNotIn(trait, rp_traits)
# Restart the compute service which should sync and set the trait on
# the provider in placement.
self.restart_compute_service(compute)
rp_traits = self._get_provider_traits(rp_uuid)
self.assertIn(trait, rp_traits)
class ComputeStatusFilterTest211(ComputeStatusFilterTest):
"""Extends ComputeStatusFilterTest and uses the 2.11 API for the
legacy os-services disable/enable/force-down API behavior
"""
microversion = '2.11'
def _update_service(self, service, disabled, forced_down=None):
"""Update the service using the 2.11 request schema.
:param service: dict representing the service resource in the API
:param disabled: True if the service should be disabled, False if the
service should be enabled
:param forced_down: Optionally change the forced_down value.
"""
# Before 2.53 the service is uniquely identified by host and binary.
body = {
'host': service['host'],
'binary': service['binary']
}
# Handle forced_down first if provided since the enable/disable
# behavior in the API depends on it.
if forced_down is not None:
body['forced_down'] = forced_down
self.admin_api.api_put('/os-services/force-down', body)
if disabled:
self.admin_api.api_put('/os-services/disable', body)
else:
self.admin_api.api_put('/os-services/enable', body)
def _get_provider_uuid_by_host(self, host):
# We have to temporarily mutate to 2.53 to get the hypervisor UUID.
with utils.temporary_mutation(self.admin_api, microversion='2.53'):
return super(ComputeStatusFilterTest211,
self)._get_provider_uuid_by_host(host)
| rahulunair/nova | nova/tests/functional/wsgi/test_services.py | Python | apache-2.0 | 19,683 |
#!/usr/bin/env python
# SIM-CITY client
#
# Copyright 2015 Netherlands eScience Center <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
class load_data:
'''
class to load txt data
'''
def __init__(self, filename):
self.filename = filename
tfile, members = self.get_archive_object_tar()
self.read_files(tfile, members)
def get_archive_object_tar(self):
'''
return tarfile object and its members
'''
tfile = tarfile.open(name=self.filename)
members = tfile.getnames()
return tfile, members
def read_files(self, tfile, members):
'''
array with txt data from tarfile object
'''
self.data = [tfile.extractfile(member).read() for member in members if
tfile.extractfile(member) is not None]
def main():
load_data('enron_mail_clean.tar.gz')
import pdb
pdb.set_trace()
if __name__ == "__main__":
main()
| nlesc-sherlock/analyzing-corpora | corpora/load_archive.py | Python | apache-2.0 | 1,507 |
zhangyu | RobbieRain/he | test.py | Python | apache-2.0 | 7 |
from .fetch import FetchParser
from .json_ld import JsonLdParser
from .lom import LomParser
from .lrmi import LrmiParser
from .nsdl_dc import NsdlDcParser
__all__ = [
'FetchParser',
'JsonLdParser',
'LomParser',
'LrmiParser',
'NsdlDcParser',
]
| navnorth/LR-Data | src/payload_schema/__init__.py | Python | apache-2.0 | 265 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Python front-end supports for functions.
NOTE: functions are currently experimental and subject to change!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.eager import context
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import compat
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
class Defun(object):
"""Decorator used to define TensorFlow functions.
Use this decorator to make a Python function usable directly as a TensorFlow
function.
The decorated function must add ops to the default graph and return zero or
more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
For example if the function to decorate accepts two `tf.float32` arguments
named `x` and `y`, call the decorator with:
@Defun(tf.float32, tf.float32)
def foo(x, y):
...
When you call the decorated function it will add `call` ops to the
default graph and adds the definition of the function into the
default graph. Because the addition of the function into the graph
is deferred, the decorator can be used anywhere in the program.
Any variables created inside of the function are hoisted into the outer graph.
Note that the variables are created in the variable scope that was active
during the first call to the function. Subsequent function calls will refer to
the same set of variables.
Definitions of functions in a graph are frozen as soon as the graph is used to
create a session. However, new functions and new calls to existing functions
may be added to the graph, with the new functions themselves becoming
immediately frozen.
Example, but also see the [How To on functions](link_needed).
```python
# Defining the function.
@tf.Defun(tf.float32, tf.float32)
def MyFunc(x, y):
return x + y, x - y
# Building the graph.
a = tf.constant([1.0])
b = tf.constant([2.0])
c, d = MyFunc(a, b, name='mycall')
```
"""
def __init__(self, *input_types, **kwargs):
"""Create a `Defun` decorator.
Args:
*input_types: A list of `tf.DType`
**kwargs: Optional keyword arguments, including
func_name - (optional). A python string, the name to use to
declare this `Function` in the graph.
grad_func - (optional). A function implementing the gradient
of the function-to-register. This is must be a
`_DefinedFunction` object. The gradient
function must satisfy the criterion defined in
function.proto:GradientDef.
python_grad_func - (optional). A function implementing the
gradient of the function python-side. This function must
take the current op and the gradients w.r.t. its outputs,
and return the gradients w.r.t. the inputs. That is it must
implement the interface expected by `tf.RegisterGradient`).
This will be called by tf.gradients to add the gradient ops
to the graph. At most one of grad_func and python_grad_func
can be specified.
out_names = (optional). A list of strings, one per output
tensor.
shape_func - (optional). A function taking the op and returning a list
of static shapes to set for the function's outputs.
"""
self._input_types = input_types
self._func_name = kwargs.pop("func_name", None)
self._grad_func = kwargs.pop("grad_func", None)
self._python_grad_func = kwargs.pop("python_grad_func", None)
self._out_names = kwargs.pop("out_names", None)
self._extra_kwargs = kwargs
def __call__(self, func):
# Various sanity checks on the callable func.
if not callable(func):
raise ValueError("func %s must be callable" % func)
# Func should not use kwargs and defaults.
argspec = tf_inspect.getargspec(func)
if argspec.keywords or argspec.defaults:
raise ValueError("Functions with argument defaults or keyword "
"arguments are not supported.")
# Computes how many arguments 'func' has.
min_args = len(argspec.args)
max_args = min_args
if argspec.varargs:
max_args = 1000000
argnames = argspec.args
if tf_inspect.ismethod(func):
# 1st argument is the "class" type.
min_args -= 1
argnames = argnames[1:]
if self._input_types:
# If Defun is given a list of types for the inputs, the number
# of input types should be compatible with 'func'.
num = len(self._input_types)
if num < min_args or num > max_args:
raise ValueError(
"The function has fewer arguments than the number of specified "
"input types.")
return _DefinedFunction(
func,
argnames,
self._input_types,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# 'func' expects no arguments and input types is an empty list.
if min_args == 0 and max_args == 0:
return _DefinedFunction(
func, [], [],
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
# Input types are unknown. It's an overloaded function and hence
# its definition needs to be deferred until it's called.
return _OverloadedFunction(
func,
argnames,
self._func_name,
self._grad_func,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
class _DefinedFunction(object):
"""_DefinedFunction encapsulates a function definition and its properties.
Attributes:
name: The function name.
definition: The definition of this function. A FunctionDef proto.
grad_func_name: If not None, the name of this function's gradient function.
python_grad_func: A python callable implementing the gradient of
the function python-side.
"""
def __init__(self,
func,
argnames,
input_types,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
shape_func=None,
capture_by_value=False,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
input_types: The function's argument types. Can be a tuple, list of
tf data types.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: An optional list of strings for the function return value
names.
shape_func: An optional function mapping an op to a list of static
output shapes.
capture_by_value: Boolean (defaults to False). If True, captured values
will be copied into the function body.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._input_types = input_types
self._func_name = func_name
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._shape_func = shape_func
self._capture_by_value = capture_by_value
self._extra_kwargs = kwargs
# Constructed only when C API is disabled, lazily
self._definition = None
# Constructed only when C API is enabled, lazily
self._c_func = None
self._sub_functions = dict() # Constructed with _definition or _c_func
# pylint: disable=protected-access
device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
# pylint: enable=protected-access
# Get the innermost device if possbile.
self._caller_device = device_funcs[-1] if device_funcs else None
# Cached OpDef for this function. When C API is enabled, this is
# the only part of FunctionDef that we cache in Python. When C API
# is disabled the whole _definition is available and this is simply
# another reference to _definition.signature
self._op_def = None
assert isinstance(input_types, (list, tuple))
self._arg_types = input_types
self._arg_names = [argnames[i] if i < len(argnames) else ("arg%d" % i)
for i in range(len(input_types))]
@property
def name(self):
"""Function name."""
self._create_definition_if_needed()
return self._func_name
@property
def definition(self):
"""Function definition proto."""
self._create_definition_if_needed()
if self._c_func:
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(self._c_func.func, buf)
fdef = function_pb2.FunctionDef()
proto_data = c_api.TF_GetBuffer(buf)
fdef.ParseFromString(compat.as_bytes(proto_data))
return fdef
return self._definition
@property
def _signature(self):
self._create_definition_if_needed()
return self._op_def
def set_grad_func(self, grad_func):
"""Specifies the gradient function of this function."""
assert not self._grad_func
assert isinstance(grad_func, _DefinedFunction)
self._grad_func = grad_func
@property
def grad_func_name(self):
"""Its gradient function's name."""
return self._grad_func.name if self._grad_func else None
@property
def python_grad_func(self):
"""Python gradient function callable."""
return self._python_grad_func
@property
def declared_input_types(self):
"""Returns the list of data types of explicit declared inputs."""
return self._input_types
@property
def captured_inputs(self):
"""Returns the list of implicitly captured inputs."""
self._create_definition_if_needed()
return self._extra_inputs
@property
def stateful_ops(self):
"""Returns the list of stateful ops in function definition.
Returns:
A list of (op.name, op.type) pairs.
"""
self._create_definition_if_needed()
return self._stateful_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
with context.graph_mode():
self._create_definition_if_needed_impl()
def _create_definition_if_needed_impl(self):
"""This is not what you want, see _create_definition_if_needed."""
if self._definition is not None or self._c_func is not None:
return
temp_graph = func_graph_from_py_func(
self._func, self._arg_names, self._arg_types, self._func_name,
self._capture_by_value, self._caller_device)
self._extra_inputs = temp_graph.extra_inputs
# pylint: disable=protected-access
self._sub_functions = temp_graph._functions
# pylint: enable=protected-access
# Extra kwargs are treated as attrs on the function def.
if self._func_name:
base_func_name = self._func_name
else:
base_func_name = function_utils.get_func_name(self._func)
if self._grad_func:
base_func_name += ("_%s" % self._grad_func.name)
kwargs_attr = _parse_kwargs_as_attrs(base_func_name, **self._extra_kwargs)
if not temp_graph._c_graph: # pylint: disable=protected-access
# Build the FunctionDef
self._definition = graph_to_function_def.graph_to_function_def(
temp_graph,
temp_graph.get_operations(),
temp_graph.inputs,
temp_graph.outputs,
out_names=self._out_names)
for k in kwargs_attr:
self._definition.attr[k].CopyFrom(kwargs_attr[k])
# Hash the definition and its dependencies.
self._hash_str = self._create_hash_str(
self._definition.signature.input_arg,
self._definition.signature.output_arg, self._definition.node_def)
# Finally, we decide the function name to use. If not specified,
# make up something which is almost certainly unique (but deterministic).
if not self._func_name:
self._func_name = "_".join([base_func_name, self._hash_str])
self._definition.signature.name = self._func_name
if self._func.__doc__:
self._definition.signature.description = self._func.__doc__
self._op_def = self._definition.signature
else: # C API is enabled
output_names = ([compat.as_bytes(x) for x in self._out_names]
if self._out_names else [])
description = self._func.__doc__ or None
# pylint: disable=protected-access
c_func = c_api.TF_GraphToFunction_wrapper(
temp_graph._c_graph,
base_func_name,
self._func_name is None, # append_hash_to_fn_name
None, # opers
[t._as_tf_output() for t in temp_graph.inputs],
[t._as_tf_output() for t in temp_graph.outputs],
output_names,
None, # opts
description)
self._c_func = c_api_util.ScopedTFFunction(c_func)
# pylint: enable=protected-access
self._set_c_attrs(kwargs_attr)
# Set cached fields: _op_def and _func_name (if not already set)
self._op_def = self.definition.signature
if self._func_name:
assert self._func_name == self._op_def.name
else:
self._func_name = compat.as_str(self._op_def.name)
self._stateful_ops = [(op.name, op.type)
for op in temp_graph.get_operations()
if op.op_def.is_stateful]
def _set_c_attrs(self, attrs):
"""Sets `attrs` as attributes of self._c_func.
Requires that self._c_func is not None.
Args:
attrs: a dictionary from attribute name to attribute proto value
"""
for name, attr_value in attrs.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
c_api.TF_FunctionSetAttrValueProto(self._c_func.func, compat.as_str(name),
serialized)
def _create_hash_str(self, input_arg, output_arg, node_def):
"""Creates an 8-character string unique to this input.
Args:
input_arg: the input_arg field of an OpDef
(e.g. self._definition.signature.input_arg)
output_arg: the output_arg field of an OpDef
(e.g. self._definition.signature.output_arg)
node_def: the node_def field of a FunctionDef
(e.g. self._definition.node_def)
Returns:
The unique string for this input
"""
hasher = hashlib.sha1()
def update_num(n):
hasher.update(compat.as_bytes("%x" % n))
def update_str(s):
update_num(len(s))
hasher.update(compat.as_bytes(s))
def update_strs(slist):
update_num(len(slist))
for s in slist:
update_str(s)
for adef in input_arg:
update_str(adef.SerializeToString())
for adef in output_arg:
update_str(adef.SerializeToString())
for n in sorted(node_def, key=lambda n: n.name):
update_str(n.name)
update_str(n.op)
update_strs(n.input)
update_num(len(n.attr))
# NOTE: protobuf map serialization does not guarantee ordering.
for k in sorted(n.attr):
update_str(k)
update_str(n.attr[k].SerializeToString())
return hasher.hexdigest()[:8]
def add_to_graph(self, g):
"""Adds this function into the graph g."""
self._create_definition_if_needed()
# Adds this function into 'g'.
# pylint: disable=protected-access
if context.executing_eagerly():
context.context().add_function_def(self.definition)
else:
g._add_function(self)
# pylint: enable=protected-access
# Ensures related sub-routines are defined in 'g', too.
for f in self._sub_functions.values():
f.add_to_graph(g)
# Adds its gradient function, too.
if self._grad_func:
self._grad_func.add_to_graph(g)
def __call__(self, *args, **kwargs):
self.add_to_graph(ops.get_default_graph())
args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs
ret, op = _call(self._signature, *args, **kwargs)
# Set a hidden attr in 'op' so that gradients_impl can refer back
# to this _DefinedFunction instance to access python_grad_func.
assert isinstance(op, ops.Operation)
setattr(op, "__defun", self)
if self._shape_func is not None:
shapes = self._shape_func(op)
if len(shapes) != len(op.outputs):
raise ValueError("shape_func produced %d shapes for %d outputs" %
(len(shapes), len(op.outputs)))
for (t, shape) in zip(op.outputs, shapes):
t.set_shape(shape)
return ret
class _OverloadedFunction(object):
"""_OverloadedFunction encapsulates an overloaded function.
_OverloadedFunction maintains a mapping from input types to
instantiated _DefinedFunction in self._overload.
"""
def __init__(self,
func,
argnames,
func_name=None,
grad_func=None,
python_grad_func=None,
out_names=None,
**kwargs):
"""Creates _DefinedFunction.
Args:
func: A python callable which constructs a tf function body.
argnames: A list of strings for function argument names.
func_name: The function name. Defaults to None, in which derives from
'func'.
grad_func: This function's gradient function, if not None. Defaults
to None.
python_grad_func: A python callable implementing the gradient of
the function python-side.
out_names: A list of strings for the function return value names.
**kwargs: The keyword arguments. **kwargs is passed to every call
site of this function.
Raises:
ValueError: The function definition is invalid.
"""
self._func = func
self._argnames = argnames
self._func_name = func_name
assert grad_func is None or isinstance(grad_func, _OverloadedFunction)
self._grad_func = grad_func
self._python_grad_func = python_grad_func
self._out_names = out_names
self._extra_kwargs = kwargs
self._overload = {}
def instantiate(self, input_types):
"""Instantiate this function given input argument types.
Args:
input_types: A list of data types for the inputs.
Returns:
_DefinedFunction for the given input types.
"""
# Stringify the type list.
key = _type_list_to_str(input_types)
defined = self._overload.get(key)
if not defined:
# If not defined yet, define the function given the input types.
name = self._func_name
if name is not None:
name = "_".join([name, key])
defined = _DefinedFunction(
self._func,
self._argnames,
input_types,
name,
None,
self._python_grad_func,
out_names=self._out_names,
**self._extra_kwargs)
_ = defined.name # Fully instantiate the function definition.
if self._grad_func:
# If _grad_func is given, it is another
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type) for _ in defined._signature.output_arg # pylint: disable=protected-access
]
# pylint: disable=protected-access
defined._grad_func = self._grad_func.instantiate(input_types +
output_types)
# pylint: enable=protected-access
self._overload[key] = defined
return defined
def __call__(self, *args, **kwargs):
input_types = []
args = list(args)
for (i, x) in enumerate(args):
x = ops.convert_to_tensor(x)
if not isinstance(x, ops.Tensor):
raise ValueError("Expect a Tensor but get ", x)
input_types.append(x.dtype)
args[i] = x
return self.instantiate(input_types)(*args, **kwargs)
class _FuncGraph(ops.Graph):
"""A helper for constructing a function.
_FuncGraph overrides ops.Graph's create_op() so that we can keep
track of all inputs into every op created inside the function. If
any input is from other graphs, we keep track of it in self.capture
and substitute the input with a place holder.
Each captured input's corresponding place holder is converted into a
function argument and the caller passes in the captured tensor.
"""
def __init__(self, name, capture_by_value, *args, **kwargs):
super(_FuncGraph, self).__init__(*args, **kwargs)
self._capture_by_value = capture_by_value
self._building_function = True
self._outer_graph = ops.get_default_graph()
self._vscope = vs.get_variable_scope()
self._old_custom_getter = self._vscope.custom_getter
# The name of the function.
self.name = name
# Placeholder tensors representing the inputs to this function. The tensors
# are in this _FuncGraph.
self.inputs = []
# Tensors that will be returned this function. The tensors are in this
# _FuncGraph.
self.outputs = []
# Maps external tensor -> internal tensor (e.g. input placeholder).
self._captured = {}
# The external tensors that have been captured as inputs and must be passed
# to this function (empty if capturing by value, otherwise these are the
# keys of _captured).
self.extra_inputs = []
# Input placeholders that been added for captured values (empty if capturing
# by value).
self.extra_args = []
# Captured variables.
# TODO(skyewm): is this needed?
self.extra_vars = []
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
# pylint: disable=protected-access
with ops.init_scope():
original_init_container = ops.get_default_graph()._container
try:
self._container = container_name
with ops.init_scope():
ops.get_default_graph()._container = container_name
yield self._container
finally:
self._container = original_container
with ops.init_scope():
ops.get_default_graph()._container = original_init_container
# pylint: enable=protected-access
# pylint: enable=g-doc-return-or-yield
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if isinstance(var, resource_variable_ops.ResourceVariable):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
def create_op(self, op_type, inputs, data_types, **kwargs):
for i, x in enumerate(inputs):
if isinstance(x, ops.EagerTensor) or x.graph is not self:
inputs[i] = self.capture(x)
return super(_FuncGraph, self).create_op(op_type, inputs, data_types,
**kwargs)
def capture(self, tensor, name=None):
"""Adds the given tensor to this graph and returns the captured tensor."""
if tensor in self._captured:
# Captured already.
return self._captured[tensor]
elif self._capture_by_value:
return self._add_tensor_and_parents(tensor)
else:
return self._capture_tensor_as_extra_input(tensor, name)
def _capture_tensor_as_extra_input(self, tensor, name=None):
# Substitute with a placeholder.
self.extra_inputs.append(tensor)
# Hoist the new input placeholder out of any control flow context
# we're currently in.
with ops.control_dependencies(None):
ph = array_ops.placeholder(
tensor.dtype, shape=tensor.get_shape(), name=name)
# pylint: disable=protected-access
if ops._USE_C_SHAPES:
if isinstance(tensor, ops.EagerTensor):
handle_data = tensor._handle_data
if handle_data:
handle_data = handle_data.SerializeToString()
else:
handle_data = c_api.GetHandleShapeAndType(tensor.graph._c_graph,
tensor._as_tf_output())
if handle_data:
c_api.SetHandleShapeAndType(ph.graph._c_graph, ph._as_tf_output(),
compat.as_bytes(handle_data))
else:
ph._handle_data = tensor._handle_data
# pylint: enable=protected-access
self.inputs.append(ph)
self._captured[tensor] = ph
self.extra_args.append(ph)
if _is_guaranteed_const(tensor):
with ops.control_dependencies(None):
return array_ops.guarantee_const(ph)
else:
return ph
def _add_tensor_and_parents(self, tensor):
op = self._add_op_and_parents(tensor.op)
return op.outputs[tensor.value_index]
def _add_op_and_parents(self, op):
# pylint: disable=protected-access
op_def = graph_to_function_def._get_op_def(op)
# pylint: enable=protected-access
if op_def.is_stateful:
raise ValueError("Cannot capture a stateful node (name:%s, type:%s) "
"by value." % (op.name, op.type))
elif op.type in ("Placeholder", "PlaceholderV2"):
raise ValueError("Cannot capture a placeholder (name:%s, type:%s) "
"by value." % (op.name, op.type))
captured_inputs = [self._add_tensor_and_parents(x) for x in op.inputs]
captured_op = self.create_op(
op.type,
captured_inputs, [o.dtype for o in op.outputs],
name=op.name,
attrs=op.node_def.attr,
op_def=op_def)
for t, captured_t in zip(op.outputs, captured_op.outputs):
self._captured[t] = captured_t
return captured_op
def func_graph_from_py_func(func, arg_names, arg_types, name=None,
capture_by_value=False, device=None,
colocation_stack=None, container=None,
collections_ref=None, arg_shapes=None):
"""Returns a _FuncGraph generated from `func`.
Args:
func: A Python callable which constructs a TF function body. The arguments
must correspond to `arg_types`. Returns a value or list/tuple of values.
No returned value can be None.
arg_names: A sequence of strings for the function argument names.
arg_types: A sequence of the function's argument types.
name: The function name. If None, the name is derived from `func`.
capture_by_value: boolean. If True, captured values will be copied into the
function body.
device: device name or function.
colocation_stack: A colocation stack (list) the _FuncGraph should use.
container: A container name the _FuncGraph should start with.
collections_ref: A reference to a collections dict the _FuncGraph should
use internally.
arg_shapes: A sequence of the function's argument shapes.
Returns:
A _FuncGraph.
Raises:
ValueError: if func returns None.
"""
if not name:
name = function_utils.get_func_name(func)
func_graph = _FuncGraph(name, capture_by_value)
with func_graph.as_default(), ops.device(device):
# pylint: disable=protected-access
if collections_ref is not None:
func_graph._collections = collections_ref
if container is not None:
func_graph._container = container
if colocation_stack is not None:
func_graph._colocation_stack = colocation_stack
# pylint: enable=protected-access
if arg_shapes is None:
arg_shapes = [None] * len(arg_types)
# Create placeholders for the function arguments.
for (argname, argtype, argshape) in zip(arg_names, arg_types, arg_shapes):
argholder = array_ops.placeholder(argtype, shape=argshape, name=argname)
func_graph.inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=func_graph.getvar):
outputs = func(*func_graph.inputs)
# There is no way of distinguishing between a function not returning
# anything and a function returning None in Python.
# We need to allow the former and ideally want to forbid the latter as
# it is most likely user error.
# TODO(iga): Consider adding a @NoOutput decorator on top of @Defun to
# allow users to explicitly mark the function as not returning anything.
# For now, we allow a single None return and interpret it as a function
# with no output.
if outputs is None:
outputs = []
else:
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any([_ is None for _ in outputs]):
raise ValueError("Function can not return None.")
# Ensures each output is a Tensor in the function graph.
outputs = [ops.convert_to_tensor(t) for t in outputs]
outputs = [func_graph.capture(t) if t.graph is not func_graph else t
for t in outputs]
func_graph.outputs = outputs
return func_graph
def _is_guaranteed_const(tensor):
"""Determines whether `tensor` is guaranteed to be a constant.
A tensor is guaranteed to be a constant if either it was produced by
a `GuaranteeConst` op or if all of its children are guaranteed to be
constants.
Args:
tensor: The tensor for which to determine const-ness.
Returns:
True if `tensor` is guaranteed to be a constant, False otherwise.
"""
if isinstance(tensor, ops.EagerTensor):
return False
class Work(object):
def __init__(self, op, leaving):
self.op = op
self.leaving = leaving
is_guaranteed_const = lambda op: op.node_def.op == "GuaranteeConst"
constants = set([])
def all_inputs_const(op):
# If all inputs of an op are guaranteed constants, then we can infer that
# the op produces a constant as well.
return op.inputs and all(inp.op in constants for inp in op.inputs)
visited = set([])
stack = [Work(tensor.op, leaving=False)]
while stack:
work = stack.pop()
if work.leaving:
if all_inputs_const(work.op):
constants.add(work.op)
continue
visited.add(work.op)
if is_guaranteed_const(work.op):
constants.add(work.op)
continue
# This op will be revisited after all its inputs are checked for const-ness.
stack.append(Work(work.op, leaving=True))
for inp in work.op.inputs:
if inp.op not in visited:
stack.append(Work(inp.op, leaving=False))
return tensor.op in constants
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" % (len(
sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
with ops.name_scope(name, func_name, inputs) as name:
op = g.create_op(
func_name,
list(inputs),
output_types,
name=name,
attrs=attrs,
op_def=sig,
compute_shapes=False)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op
def _from_definition(fdef, grad_func=None):
"""Creates a _DefinedFunction initialized from a FunctionDef proto.
Args:
fdef: a FunctionDef
grad_func: a _DefinedFunction or None
Returns:
A _DefinedFunction representing fdef
"""
# TODO(iga): This method does major surgery on _DefinedFunction.
# Make it a named constructor using @classmethod of _DefinedFunction.
# The Python callable is only needed to create a FunctionDef. Since we have
# the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we
# have access to such a callable here).
func = None
argnames = [arg.name for arg in fdef.signature.input_arg]
input_types = tuple(
dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)
func_name = fdef.signature.name
# Note: FunctionDefs do not include python gradient functions, so if the
# original _DefinedFunction included one it will not be reflected here.
python_grad_func = None
out_names = [arg.name for arg in fdef.signature.output_arg]
result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,
python_grad_func, out_names)
# pylint: disable=protected-access
serialized = fdef.SerializeToString()
c_func = c_api.TF_FunctionImportFunctionDef(serialized)
result._c_func = c_api_util.ScopedTFFunction(c_func)
result._extra_inputs = []
# pylint: enable=protected-access
return result
def _from_library(lib):
"""Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.
This method handles assigning the correct gradient functions to each
function.
Args:
lib: a FunctionDefLibrary
Returns:
A list of _DefinedFunctions
Raises:
ValueError: `lib` is invalid
"""
if not lib.function and not lib.gradient:
return []
# function name -> FunctionDef proto
funcs = {fdef.signature.name: fdef for fdef in lib.function}
# Validate that all references function names have function defs
for g in lib.gradient:
if g.function_name not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.function_name, str(lib)))
if g.gradient_func not in funcs:
raise ValueError("FunctionDefLibrary missing '%s' FunctionDef\n%s" %
(g.gradient_func, str(lib)))
# function name -> gradient function name
func_to_grad = collections.defaultdict(lambda: None)
# gradient function name -> names of functions having that grad function
grad_to_funcs = collections.defaultdict(list)
for gdef in lib.gradient:
func_to_grad[gdef.function_name] = gdef.gradient_func
grad_to_funcs[gdef.gradient_func].append(gdef.function_name)
# Start with functions without gradients
ready = [
fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None
]
if not ready:
raise ValueError(
"FunctionDefLibrary contains cyclic gradient functions!\n" + str(lib))
# function name -> _DefinedFunction
initialized = {}
while ready:
fdef = ready.pop()
name = fdef.signature.name
grad = initialized.get(func_to_grad[name])
if func_to_grad[name]:
assert grad
defined_func = _from_definition(fdef, grad_func=grad)
initialized[name] = defined_func
ready.extend(funcs[f] for f in grad_to_funcs[name])
return initialized.values()
def _get_experimental_kwarg_as_attr(attr_name, value):
"""Creates an AttrValue for a python object."""
if isinstance(value, bool):
return attr_value_pb2.AttrValue(b=value)
elif isinstance(value, int):
return attr_value_pb2.AttrValue(i=value)
elif isinstance(value, float):
return attr_value_pb2.AttrValue(f=value)
elif isinstance(value, str):
return attr_value_pb2.AttrValue(s=compat.as_bytes(value))
else:
raise ValueError("Unsupported attribute type for %s with type %s" %
(attr_name, type(value)))
def _parse_kwargs_as_attrs(func_name, **kwargs):
"""Parses **kwargs into a node's attributes."""
attrs = {}
noinline = kwargs.pop("noinline", None)
if noinline is not None:
attrs["_noinline"] = attr_value_pb2.AttrValue(b=bool(noinline))
compiled = kwargs.pop("compiled", None)
separate_compiled_gradients = kwargs.pop("separate_compiled_gradients", None)
if compiled is not None:
attrs["_XlaCompile"] = attr_value_pb2.AttrValue(b=bool(compiled))
attrs["_XlaSeparateCompiledGradients"] = attr_value_pb2.AttrValue(
b=bool(separate_compiled_gradients))
# Forward _XlaScope from enclosing context (if set), otherwise create new.
# pylint: disable=protected-access
if "_XlaScope" in ops.get_default_graph()._attr_scope_map:
attrs["_XlaScope"] = ops.get_default_graph()._attr_scope_map["_XlaScope"]
else:
attrs["_XlaScope"] = attr_value_pb2.AttrValue(
s=("function_%s" % func_name).encode())
# pylint: enable=protected-access
kwargs_keys = list(kwargs.keys())
for key in kwargs_keys:
if key.startswith("experimental_"):
attrs[key] = _get_experimental_kwarg_as_attr(key, kwargs[key])
del kwargs[key]
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % kwargs.keys())
return attrs
def get_extra_vars():
"""Returns the captured variables by the function.
Returns:
If the default graph is being used to define a function, the
returned list of variables are those created inside the function
body so far. Otherwise, returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_vars
else:
return []
def get_extra_inputs():
"""Returns the captured input tensors by the function.
Returns:
If the default graph is being used to define a function, the
returned list of tensors are those accessed inside the function body
but defined outside the function body so far. Otherwise, returns an
empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_inputs
else:
return []
def get_extra_args():
"""Returns the corresponding function arguments for the captured inputs.
Returns:
If the default graph is being used to define a function, the
returned list of place holders are those used inside the function
body corresponding those returned by get_extra_inputs(). Otherwise,
returns an empty list.
"""
g = ops.get_default_graph()
if isinstance(g, _FuncGraph):
return g.extra_args
else:
return []
def _type_list_to_str(types):
if any([_ not in _DTYPE_TO_STR for _ in types]):
raise ValueError("Unsupported dtypes: %s" % types)
return "".join([_DTYPE_TO_STR[_] for _ in types])
# NOTE: The list needs to be extended when more data types are added.
_DTYPE_TO_STR = {
dtypes.float16: "f16",
dtypes.float32: "f32",
dtypes.float64: "f64",
dtypes.int32: "i32",
dtypes.uint8: "i8",
dtypes.uint16: "u16",
dtypes.uint32: "u32",
dtypes.uint64: "u64",
dtypes.int16: "i16",
dtypes.int8: "i8",
dtypes.string: "s",
dtypes.complex64: "c64",
dtypes.complex128: "c128",
dtypes.int64: "i64",
dtypes.bool: "b",
dtypes.qint8: "qi8",
dtypes.quint8: "qu8",
dtypes.qint16: "qi16",
dtypes.quint16: "qu16",
dtypes.qint32: "qi32",
dtypes.bfloat16: "b16"
}
def function_def_from_tf_function(c_func):
"""Converts a SWIG-wrapped TF_Function* to a FunctionDef proto."""
with c_api_util.tf_buffer() as buf:
c_api.TF_FunctionToFunctionDef(c_func, buf)
data = c_api.TF_GetBuffer(buf)
fdef = function_pb2.FunctionDef()
fdef.ParseFromString(compat.as_bytes(data))
return fdef
| kobejean/tensorflow | tensorflow/python/framework/function.py | Python | apache-2.0 | 43,277 |
# -*- coding: utf-8 -*-
#
# Armstrong Platform Documentation documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 26 13:38:48 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Armstrong Platform'
copyright = u'2011, Bay Citizen and Texas Tribune'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '12.03.1'
# The full version, including alpha/beta/rc tags.
release = '12.03.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'armstrong'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes', ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'index.html',
}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ArmstrongPlatformDocumentationdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ArmstrongPlatformDocumentation.tex', u'Armstrong Platform Documentation Documentation',
u'Bay Citizen and Texas Tribune', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'armstrongplatformdocumentation', u'Armstrong Platform Documentation Documentation',
[u'Bay Citizen and Texas Tribune'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Armstrong Platform Documentation'
epub_author = u'Bay Citizen and Texas Tribune'
epub_publisher = u'Bay Citizen and Texas Tribune'
epub_copyright = u'2011, Bay Citizen and Texas Tribune'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| armstrong/docs.armstrongcms.org | source/conf.py | Python | apache-2.0 | 8,620 |
# ormbad.version
# Helper module for ORMBad version information
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Thu Aug 13 12:38:42 2015 -0400
#
# Copyright (C) 2015 Tipsy Bear Studios
# For license information, see LICENSE.txt
#
# ID: version.py [] [email protected] $
"""
Helper module for ORMBad version information.
"""
##########################################################################
## Versioning
##########################################################################
__version_info__ = {
'major': 0,
'minor': 1,
'micro': 0,
'releaselevel': 'final',
'serial': 0,
}
def get_version(short=False):
"""
Returns the version from the version info.
"""
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i" % __version_info__, ]
if __version_info__['micro']:
vers.append(".%(micro)i" % __version_info__)
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0],
__version_info__['serial']))
return ''.join(vers)
| tipsybear/ormbad | ormbad/version.py | Python | apache-2.0 | 1,161 |
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a bidder-level filter set.
A bidder-level filter set can be used to retrieve aggregated data for all
Authorized Buyers accounts under the given bidder account, including the bidder
account itself.
"""
import argparse
from datetime import date
from datetime import datetime
from datetime import timedelta
import os
import pprint
import sys
import uuid
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
_DATE_FORMAT = '%Y%m%d'
_FILTER_SET_NAME_TEMPLATE = ('bidders/{bidders_resource_id}/'
'filterSets/{filtersets_resource_id}')
_OWNER_NAME_TEMPLATE = 'bidders/{bidders_resource_id}'
_TODAY = date.today()
_VALID_ENVIRONMENTS = ('WEB', 'APP')
_VALID_FORMATS = ('DISPLAY', 'VIDEO')
_VALID_PLATFORMS = ('DESKTOP', 'TABLET', 'MOBILE')
_VALID_TIME_SERIES_GRANULARITIES = ('HOURLY', 'DAILY')
DEFAULT_BIDDER_RESOURCE_ID = 'ENTER_BIDDER_RESOURCE_ID_HERE'
DEFAULT_FILTER_SET_RESOURCE_ID = f'FilterSet_{uuid.uuid4()}'
DEFAULT_END_DATE = _TODAY.strftime(_DATE_FORMAT)
DEFAULT_START_DATE = (_TODAY - timedelta(days=7)).strftime(
_DATE_FORMAT)
def main(ad_exchange_buyer, owner_name, body, is_transient):
try:
# Construct and execute the request.
filter_set = ad_exchange_buyer.bidders().filterSets().create(
ownerName=owner_name, isTransient=is_transient, body=body).execute()
print(f'FilterSet created for bidder: "{owner_name}".')
pprint.pprint(filter_set)
except HttpError as e:
print(e)
if __name__ == '__main__':
def time_series_granularity_type(s):
if s not in _VALID_TIME_SERIES_GRANULARITIES:
raise argparse.ArgumentTypeError('Invalid TimeSeriesGranularity '
f'specified: "{s}".')
return s
def environment_type(s):
if s not in _VALID_ENVIRONMENTS:
raise argparse.ArgumentTypeError(
f'Invalid Environment specified: "{s}".')
return s
def format_type(s):
if s not in _VALID_FORMATS:
raise argparse.ArgumentTypeError(f'Invalid Format specified: "{s}".')
return s
def platform_type(s):
if s not in _VALID_PLATFORMS:
raise argparse.ArgumentTypeError(f'Invalid Platform specified: "{s}".')
return s
def valid_date(s):
try:
return datetime.strptime(s, _DATE_FORMAT).date()
except ValueError:
raise argparse.ArgumentTypeError(f'Invalid date specified: "{s}".')
parser = argparse.ArgumentParser(
description=('Creates a bidder-level filter set with the specified '
'options.'))
# Required fields.
parser.add_argument(
'-b', '--bidder_resource_id', default=DEFAULT_BIDDER_RESOURCE_ID,
help=('The resource ID of the bidders resource for which the filter set '
'is being created. This will be used to construct the ownerName '
'used as a path parameter for filter set requests. For additional '
'information on how to configure the ownerName path parameter, '
'see: https://developers.google.com/authorized-buyers/apis/'
'reference/rest/v2beta1/bidders.filterSets/create'
'#body.PATH_PARAMETERS.owner_name'))
parser.add_argument(
'-r', '--resource_id', default=DEFAULT_FILTER_SET_RESOURCE_ID,
help=('The resource ID of the filter set. Note that this must be '
'unique. This will be used to construct the filter set\'s name. '
'For additional information on how to configure a filter set\'s '
'name, see: https://developers.google.com/authorized-buyers/apis/'
'reference/rest/v2beta1/bidders.filterSets#FilterSet.FIELDS.name'))
parser.add_argument(
'--end_date', default=DEFAULT_END_DATE, type=valid_date,
help=('The end date for the filter set\'s absoluteDateRange field, which '
'will be accepted in this example in YYYYMMDD format.'))
parser.add_argument(
'--start_date', default=DEFAULT_START_DATE, type=valid_date,
help=('The start date for the filter set\'s time_range field, which '
'will be accepted in this example in YYYYMMDD format.'))
# Optional fields.
parser.add_argument(
'-e', '--environment', required=False,
type=environment_type,
help=('The environment on which to filter.'))
parser.add_argument(
'-f', '--format', required=False,
type=format_type,
help=('The format on which to filter.'))
parser.add_argument(
'-p', '--platforms', required=False, nargs='*', type=platform_type,
help=('The platforms on which to filter. The filters represented by '
'multiple platforms are ORed together. Note that you may specify '
'more than one using a space as a delimiter.'))
parser.add_argument(
'-s', '--seller_network_ids', required=False, nargs='*', type=int,
help=('The list of IDs for seller networks on which to filter. The '
'filters represented by multiple seller network IDs are ORed '
'together. Note that you may specify more than one using a space '
'as a delimiter.'))
parser.add_argument(
'-t', '--time_series_granularity', required=False,
type=time_series_granularity_type,
help=('The granularity of time intervals if a time series breakdown is '
'desired.'))
parser.add_argument(
'--is_transient', required=False, default=True, type=bool,
help=('Whether the filter set is transient, or should be persisted '
'indefinitely. In this example, this will default to True.'))
args = parser.parse_args()
# Build the time_range as an AbsoluteDateRange.
time_range = {
'startDate': {
'year': args.start_date.year,
'month': args.start_date.month,
'day': args.start_date.day
},
'endDate': {
'year': args.end_date.year,
'month': args.end_date.month,
'day': args.end_date.day
}
}
# Create a body containing the required fields.
BODY = {
'name': _FILTER_SET_NAME_TEMPLATE.format(
bidders_resource_id=args.bidder_resource_id,
filtersets_resource_id=args.resource_id),
# Note: You may alternatively specify relativeDateRange or
# realtimeTimeRange.
'absoluteDateRange': time_range
}
# Add optional fields to body if specified.
if args.environment:
BODY['environment'] = args.environment
if args.format:
BODY['format'] = args.format
if args.platforms:
BODY['platforms'] = args.platforms
if args.seller_network_ids:
BODY['sellerNetworkIds'] = args.seller_network_ids
if args.time_series_granularity:
BODY['timeSeriesGranularity'] = args.time_series_granularity
try:
service = samples_util.GetService('v2beta1')
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, _OWNER_NAME_TEMPLATE.format(
bidders_resource_id=args.bidder_resource_id),
BODY, args.is_transient)
| googleads/googleads-adxbuyer-examples | python/samples/v2_x/create_bidder_level_filter_set.py | Python | apache-2.0 | 7,717 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class DropoutLayersTest(keras_parameterized.TestCase):
def test_dropout(self):
testing_utils.layer_test(
keras.layers.Dropout, kwargs={'rate': 0.5}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dropout,
kwargs={'rate': 0.5,
'noise_shape': [3, 1]},
input_shape=(3, 2))
def test_dropout_supports_masking(self):
dropout = keras.layers.Dropout(0.5)
self.assertEqual(True, dropout.supports_masking)
def test_spatial_dropout_1d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout1D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4))
def test_spatial_dropout_2d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout2D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 5))
def test_spatial_dropout_3d(self):
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5},
input_shape=(2, 3, 4, 4, 5))
testing_utils.layer_test(
keras.layers.SpatialDropout3D,
kwargs={'rate': 0.5, 'data_format': 'channels_first'},
input_shape=(2, 3, 4, 4, 5))
@keras_parameterized.run_all_keras_modes
class LambdaLayerTest(keras_parameterized.TestCase):
def test_lambda(self):
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={'function': lambda x: x + 1},
input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Lambda,
kwargs={
'function': lambda x, a, b: x * a + b,
'arguments': {
'a': 0.6,
'b': 0.4
}
},
input_shape=(3, 2))
# test serialization with function
def f(x):
return x + 1
ld = keras.layers.Lambda(f)
config = ld.get_config()
ld = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
# test with lambda
ld = keras.layers.Lambda(
lambda x: keras.backend.concatenate([math_ops.square(x), x]))
config = ld.get_config()
ld = keras.layers.Lambda.from_config(config)
def test_lambda_multiple_inputs(self):
ld = keras.layers.Lambda(lambda x: x[0], output_shape=lambda x: x[0])
x1 = np.ones([3, 2], np.float32)
x2 = np.ones([3, 5], np.float32)
out = ld([x1, x2])
self.assertAllEqual(out.shape, [3, 2])
def test_lambda_output_shape(self):
l = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual((1, 1), l.get_config()['output_shape'])
def test_lambda_output_shape_function(self):
def get_output_shape(input_shape):
return 1 * input_shape
l = keras.layers.Lambda(lambda x: x + 1, output_shape=get_output_shape)
l(keras.backend.variable(np.ones((1, 1))))
self.assertEqual('lambda', l.get_config()['output_shape_type'])
def test_lambda_output_shape_autocalculate_multiple_inputs(self):
def lambda_fn(x):
return math_ops.matmul(x[0], x[1])
l = keras.layers.Lambda(lambda_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual((10, 20), output_shape)
def test_lambda_output_shape_list_multiple_outputs(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=[(10,), (20,)])
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_output_shape_tuple_with_none(self):
def lambda_fn(x):
return x
l = keras.layers.Lambda(lambda_fn, output_shape=(None, 10))
output_shape = l.compute_output_shape((5, 10, 20))
self.assertAllEqual([5, None, 10], output_shape.as_list())
def test_lambda_output_shape_function_multiple_outputs(self):
def lambda_fn(x):
return x
def output_shape_fn(input_shape):
return input_shape
l = keras.layers.Lambda(lambda_fn, output_shape=output_shape_fn)
output_shape = l.compute_output_shape([(10, 10), (10, 20)])
self.assertAllEqual([(10, 10), (10, 20)], output_shape)
def test_lambda_config_serialization(self):
# Test serialization with output_shape and output_shape_type
layer = keras.layers.Lambda(lambda x: x + 1, output_shape=(1, 1))
layer(keras.backend.variable(np.ones((1, 1))))
config = layer.get_config()
layer = keras.layers.deserialize({
'class_name': 'Lambda',
'config': config
})
layer = keras.layers.Lambda.from_config(config)
@keras_parameterized.run_all_keras_modes
class CoreLayersTest(keras_parameterized.TestCase):
def test_masking(self):
testing_utils.layer_test(
keras.layers.Masking, kwargs={}, input_shape=(3, 2, 3))
def test_keras_mask(self):
x = np.ones((10, 10))
y = keras.layers.Masking(1.)(x)
self.assertTrue(hasattr(y, '_keras_mask'))
self.assertTrue(y._keras_mask is not None)
self.assertAllClose(self.evaluate(y._keras_mask), np.zeros((10,)))
def test_activation(self):
# with string argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': 'relu'},
input_shape=(3, 2))
# with function argument
testing_utils.layer_test(
keras.layers.Activation,
kwargs={'activation': keras.backend.relu},
input_shape=(3, 2))
def test_reshape(self):
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (8, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (1, -1)},
input_shape=(3, 2, 4))
testing_utils.layer_test(
keras.layers.Reshape,
kwargs={'target_shape': (-1, 1)},
input_shape=(None, None, 2))
def test_permute(self):
testing_utils.layer_test(
keras.layers.Permute, kwargs={'dims': (2, 1)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_starting_dims_index(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (0, 1, 2)}, input_shape=(3, 2, 4))
def test_permute_errors_on_invalid_set_of_dims_indices(self):
with self.assertRaisesRegexp(ValueError, r'Invalid permutation .*dims.*'):
testing_utils.layer_test(
keras.layers.Permute,
kwargs={'dims': (1, 4, 2)}, input_shape=(3, 2, 4))
def test_flatten(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3, 2, 4))
# Test channels_first
inputs = np.random.random((10, 3, 5, 5)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.reshape(
np.transpose(inputs, (0, 2, 3, 1)), (-1, 5 * 5 * 3))
self.assertAllClose(outputs, target_outputs)
def test_flatten_scalar_channels(self):
testing_utils.layer_test(
keras.layers.Flatten, kwargs={}, input_shape=(3,))
# Test channels_first
inputs = np.random.random((10,)).astype('float32')
outputs = testing_utils.layer_test(
keras.layers.Flatten,
kwargs={'data_format': 'channels_first'},
input_data=inputs)
target_outputs = np.expand_dims(inputs, -1)
self.assertAllClose(outputs, target_outputs)
def test_repeat_vector(self):
testing_utils.layer_test(
keras.layers.RepeatVector, kwargs={'n': 3}, input_shape=(3, 2))
def test_dense(self):
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(None, None, 2))
testing_utils.layer_test(
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
def test_dense_dtype(self):
inputs = ops.convert_to_tensor(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype='float32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float32')
def test_dense_with_policy(self):
inputs = ops.convert_to_tensor(
np.random.randint(low=0, high=7, size=(2, 2)), dtype='float16')
layer = keras.layers.Dense(5, dtype=policy.Policy('infer_float32_vars'))
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float16')
self.assertEqual(layer.kernel.dtype, 'float32')
def test_dense_regularization(self):
layer = keras.layers.Dense(
3,
kernel_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l1',
activity_regularizer='l2',
name='dense_reg')
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(3, len(layer.losses))
def test_dense_constraints(self):
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = keras.layers.Dense(
3, kernel_constraint=k_constraint, bias_constraint=b_constraint)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_activity_regularization(self):
layer = keras.layers.ActivityRegularization(l1=0.1)
layer(keras.backend.variable(np.ones((2, 4))))
self.assertEqual(1, len(layer.losses))
config = layer.get_config()
self.assertEqual(config.pop('l1'), 0.1)
def test_numpy_inputs(self):
if context.executing_eagerly():
layer = keras.layers.RepeatVector(2)
x = np.ones((10, 10))
self.assertAllEqual(np.ones((10, 2, 10)), layer(x))
layer = keras.layers.Concatenate()
x, y = np.ones((10, 10)), np.ones((10, 10))
self.assertAllEqual(np.ones((10, 20)), layer([x, y]))
if __name__ == '__main__':
test.main()
| ageron/tensorflow | tensorflow/python/keras/layers/core_test.py | Python | apache-2.0 | 11,584 |
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013,2014 Dave Hughes <[email protected]>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import ctypes as ct
import warnings
_lib = ct.CDLL('libbcm_host.so')
# bcm_host.h #################################################################
bcm_host_init = _lib.bcm_host_init
bcm_host_init.argtypes = []
bcm_host_init.restype = None
bcm_host_deinit = _lib.bcm_host_deinit
bcm_host_deinit.argtypes = []
bcm_host_deinit.restype = None
graphics_get_display_size = _lib.graphics_get_display_size
graphics_get_display_size.argtypes = [ct.c_uint16, ct.POINTER(ct.c_uint32), ct.POINTER(ct.c_uint32)]
graphics_get_display_size.restype = ct.c_int32
| naziris/HomeSecPi | picamera/bcm_host.py | Python | apache-2.0 | 2,448 |
# quick demo of some python image filters
# using raspberry pi camera
import Tkinter as tk
from picamera import PiCamera
from time import sleep
from PIL import Image,ImageFilter,ImageChops,ImageTk
imagefile = "image.jpg"
w = 320
h = 240
lastfilter = "none"
camera = PiCamera()
def takephoto():
camera.capture(imagefile)
image1 = Image.open(imagefile)
return image1
def photoloop():
count = 0
while (count < 9):
sleep(0.5)
image1 = newphoto()
if lastfilter is not "none":
dofilter(lastfilter,image1)
count = count + 1
def newphoto():
global image1
image1 = takephoto()
tkimage1 = ImageTk.PhotoImage(image1)
panel1.configure(image=tkimage1)
panel1.image = tkimage1
def invert():
global image1
image1= ImageChops.invert(image1)
tkimage1 = ImageTk.PhotoImage(image1)
panel1.configure(image=tkimage1)
panel1.image = tkimage1
def grayscale():
global image1
r, g, b = image1.split()
image1 = Image.merge("RGB", (g,g,g))
tkimage1 = ImageTk.PhotoImage(image1)
panel1.configure(image=tkimage1)
panel1.image = tkimage1
def dofilter (theimage,thefilter):
lastfilter = thefilter
global image1
image1 = image1.filter(thefilter)
tkimage1 = ImageTk.PhotoImage(image1)
panel1.configure(image=tkimage1)
panel1.image = tkimage1
# Setup a window
root = tk.Tk()
root.title('Image')
image1 = takephoto()
tkimage1 = ImageTk.PhotoImage(image1)
w = tkimage1.width()
h = tkimage1.height()
root.geometry("%dx%d+%d+%d" % (w, h, 0, 0))
# root has no image argument, so use a label as a panel
panel1 = tk.Label(root, image=tkimage1)
panel1.pack(side='top', fill='both', expand='yes')
# save the panel's image from 'garbage collection'
panel1.image = tkimage1
# Add some buttons
buttonrow = tk.Frame(root)
buttonrow.place(y=0,x=0)
button = tk.Button(buttonrow, text='CAMERA',command = lambda: newphoto())
button.pack(side='left',)
button = tk.Button(buttonrow, text='LOOP',command = lambda: photoloop())
button.pack(side='left',)
button = tk.Button(buttonrow, text='INVERT',command = lambda: invert())
button.pack(side='left',)
button = tk.Button(buttonrow, text='GRAY',command = lambda: grayscale())
button.pack(side='left',)
# add some filter buttons
button = tk.Button(buttonrow, text='BLUR',command = lambda: dofilter(image1,ImageFilter.BLUR))
button.pack(side='left')
button = tk.Button(buttonrow, text='CONTOUR',command = lambda: dofilter(image1,ImageFilter.CONTOUR))
button.pack(side='left')
button = tk.Button(buttonrow, text='FIND_EDGES',command = lambda: dofilter(image1,ImageFilter.FIND_EDGES))
button.pack(side='left')
button = tk.Button(buttonrow, text='EMBOSS',command = lambda: dofilter(image1,ImageFilter.EMBOSS))
button.pack(side='left')
button = tk.Button(buttonrow, text='EDGE_ENHANCE',command = lambda: dofilter(image1,ImageFilter.EDGE_ENHANCE))
button.pack(side='left')
button = tk.Button(buttonrow, text='CLOSE',command = lambda: root.destroy())
button.pack(side='left')
root.mainloop() | emschimmel/CameraPi | camera_try.py | Python | apache-2.0 | 2,985 |
"""Support for switches which integrates with other components."""
import logging
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_SWITCHES,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"]
ON_ACTION = "turn_on"
OFF_ACTION = "turn_off"
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Required(ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Required(OFF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
async def _async_create_entities(hass, config):
"""Create the Template switches."""
switches = []
for device, device_config in config[CONF_SWITCHES].items():
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
on_action = device_config[ON_ACTION]
off_action = device_config[OFF_ACTION]
unique_id = device_config.get(CONF_UNIQUE_ID)
switches.append(
SwitchTemplate(
hass,
device,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
unique_id,
)
)
return switches
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template switches."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities(await _async_create_entities(hass, config))
class SwitchTemplate(TemplateEntity, SwitchEntity, RestoreEntity):
"""Representation of a Template switch."""
def __init__(
self,
hass,
device_id,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
unique_id,
):
"""Initialize the Template switch."""
super().__init__(
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
domain = __name__.split(".")[-2]
self._on_script = Script(hass, on_action, friendly_name, domain)
self._off_script = Script(hass, off_action, friendly_name, domain)
self._state = False
self._unique_id = unique_id
@callback
def _update_state(self, result):
super()._update_state(result)
if isinstance(result, TemplateError):
self._state = None
return
self._state = result.lower() in ("true", STATE_ON)
async def async_added_to_hass(self):
"""Register callbacks."""
if self._template is None:
# restore state after startup
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._state = state.state == STATE_ON
# no need to listen for events
else:
self.add_template_attribute(
"_state", self._template, None, self._update_state
)
await super().async_added_to_hass()
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this switch."""
return self._unique_id
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def should_poll(self):
"""Return the polling state."""
return False
async def async_turn_on(self, **kwargs):
"""Fire the on action."""
await self._on_script.async_run(context=self._context)
if self._template is None:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Fire the off action."""
await self._off_script.async_run(context=self._context)
if self._template is None:
self._state = False
self.async_write_ha_state()
@property
def assumed_state(self):
"""State is assumed, if no template given."""
return self._template is None
| titilambert/home-assistant | homeassistant/components/template/switch.py | Python | apache-2.0 | 6,023 |
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Federated CIFAR-10 classification library using TFF."""
import functools
from typing import Callable, Optional
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from fedopt_guide import training_loop
from utils.datasets import cifar10_dataset
from utils.models import resnet_models
CIFAR_SHAPE = (32, 32, 3)
NUM_CLASSES = 100
def run_federated(
iterative_process_builder: Callable[..., tff.templates.IterativeProcess],
client_epochs_per_round: int,
client_batch_size: int,
clients_per_round: int,
client_datasets_random_seed: Optional[int] = None,
crop_size: Optional[int] = 24,
total_rounds: Optional[int] = 1500,
experiment_name: Optional[str] = 'federated_cifar10',
root_output_dir: Optional[str] = '/tmp/fed_opt',
uniform_weighting: Optional[bool] = False,
**kwargs):
"""Runs an iterative process on the CIFAR-10 classification task.
This method will load and pre-process dataset and construct a model used for
the task. It then uses `iterative_process_builder` to create an iterative
process that it applies to the task, using
`federated_research.utils.training_loop`.
We assume that the iterative process has the following functional type
signatures:
* `initialize`: `( -> S@SERVER)` where `S` represents the server state.
* `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
represents the server state, `{B*}` represents the client datasets,
and `T` represents a python `Mapping` object.
The iterative process must also have a callable attribute `get_model_weights`
that takes as input the state of the iterative process, and returns a
`tff.learning.ModelWeights` object.
Args:
iterative_process_builder: A function that accepts a no-arg `model_fn`, and
returns a `tff.templates.IterativeProcess`. The `model_fn` must return a
`tff.learning.Model`.
client_epochs_per_round: An integer representing the number of epochs of
training performed per client in each training round.
client_batch_size: An integer representing the batch size used on clients.
clients_per_round: An integer representing the number of clients
participating in each round.
client_datasets_random_seed: An optional int used to seed which clients are
sampled at each round. If `None`, no seed is used.
crop_size: An optional integer representing the resulting size of input
images after preprocessing.
total_rounds: The number of federated training rounds.
experiment_name: The name of the experiment being run. This will be appended
to the `root_output_dir` for purposes of writing outputs.
root_output_dir: The name of the root output directory for writing
experiment outputs.
uniform_weighting: Whether to weigh clients uniformly. If false, clients are
weighted by the number of samples.
**kwargs: Additional arguments configuring the training loop. For details on
supported arguments, see `federated_research/utils/training_utils.py`.
"""
crop_shape = (crop_size, crop_size, 3)
cifar_train, _ = cifar10_dataset.get_federated_datasets(
train_client_epochs_per_round=client_epochs_per_round,
train_client_batch_size=client_batch_size,
crop_shape=crop_shape)
_, cifar_test = cifar10_dataset.get_centralized_datasets(
crop_shape=crop_shape)
input_spec = cifar_train.create_tf_dataset_for_client(
cifar_train.client_ids[0]).element_spec
model_builder = functools.partial(
resnet_models.create_resnet18,
input_shape=crop_shape,
num_classes=NUM_CLASSES)
loss_builder = tf.keras.losses.SparseCategoricalCrossentropy
metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]
def tff_model_fn() -> tff.learning.Model:
return tff.learning.from_keras_model(
keras_model=model_builder(),
input_spec=input_spec,
loss=loss_builder(),
metrics=metrics_builder())
if uniform_weighting:
client_weight_fn = tff.learning.ClientWeighting.UNIFORM
else:
client_weight_fn = tff.learning.ClientWeighting.NUM_EXAMPLES
training_process = iterative_process_builder(tff_model_fn, client_weight_fn)
client_datasets_fn = functools.partial(
tff.simulation.build_uniform_sampling_fn(
dataset=cifar_train.client_ids,
random_seed=client_datasets_random_seed), # pytype: disable=wrong-keyword-args # gen-stub-imports
size=clients_per_round)
evaluate_fn = tff.learning.build_federated_evaluation(
tff_model_fn, use_experimental_simulation_loop=True)
def validation_fn(model_weights, round_num):
del round_num
return evaluate_fn(model_weights, [cifar_test])
def test_fn(model_weights):
return evaluate_fn(model_weights, [cifar_test])
logging.info('Training model:')
logging.info(model_builder().summary())
training_loop.run(
iterative_process=training_process,
train_client_datasets_fn=client_datasets_fn,
evaluation_fn=validation_fn,
test_fn=test_fn,
total_rounds=total_rounds,
experiment_name=experiment_name,
root_output_dir=root_output_dir,
**kwargs)
| google-research/federated | fedopt_guide/cifar10_resnet/federated_cifar10.py | Python | apache-2.0 | 5,796 |
#coding=utf-8
from django.db import models
from django.contrib.auth.models import User
class Activity(models.Model):
owner = models.ForeignKey(User, null=False)
text = models.CharField(max_length=20, unique=True)
class Dessert(models.Model):
activity = models.ForeignKey(Activity, null=False)
description = models.TextField()
photo = models.ImageField()
| Moonshile/goondream | src/desserts/models.py | Python | apache-2.0 | 376 |
import time
from tsm.common.app import exception
import requests
import json
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
KANGROUTER_WEBSERVICE_APPLICATION_ROOT="/kangrouter/srv/v1"
class KangRouterClient:
pathbase = "https://thesolvingmachine.com/kangrouter/srv/v1/solvers"
def __init__(self,apiKey,licenseId):
self.headers = {"content-type": "application/json",
"Authorization": apiKey}
self.params = {"licenseId" : licenseId }
retries = Retry(total=5,
backoff_factor=0.75)
self.session = requests.Session()
self.session.mount(KANGROUTER_WEBSERVICE_APPLICATION_ROOT,
HTTPAdapter(max_retries=retries))
def validateReply(self,req):
if req.status_code >= 400 and req.status_code <= 500:
try:
j = req.json()
except ValueError:
raise exception.InternalError(req.text,req.status_code)
raise exception.jsonToException(req.json())
def create(self,problem,**kwargs):
path = self.pathbase
payload=json.dumps(problem)
params = self.params.copy()
params.update(kwargs)
req = self.session.post(path,
params=params,
headers=self.headers,
data=payload)
self.validateReply(req)
return req.text
def delete(self,solverId):
path = "{base}/{solverId}".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.delete(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return True
def stop(self,solverId):
path = "{base}/{solverId}/stop".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.put(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return True
def getStatus(self,solverId):
path = "{base}/{solverId}/status".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.get(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return req.json()
def getSolution(self,solverId):
path = "{base}/{solverId}/solution".format(base=self.pathbase,
solverId=str(solverId))
req = self.session.get(path,
params=self.params,
headers=self.headers)
self.validateReply(req)
return req.json()
# polling
def createAndWait(self,problem,cancel,**kwargs):
solverId = self.create(problem,**kwargs)
timeout = 300
while not cancel() and timeout>0:
status = self.getStatus(solverId)
if status["execStatus"] =="invalid":
raise exception.solverError(json.dumps(status["errors"]))
if status["execStatus"] =="completed":
return self.getSolution(solverId)
time.sleep(1)
timeout -= 1
if timeout == 0:
raise exception.InternalError("Timed out waiting for solver")
raise exception.UserCancelled()
| TheSolvingMachine/kangrouter-py | kangrouter.py | Python | apache-2.0 | 3,278 |
#!/usr/bin/env python
"""
This pretty much just tests creating a user, a universe, a planet, a building type name, a building
type, and a building.
"""
import os
import sys
import sqlalchemy
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import legendary_waffle
# Database setup
db_engine = sqlalchemy.create_engine("sqlite://")
legendary_waffle.models.MODELBASE.metadata.create_all(db_engine)
legendary_waffle.models.MODELBASE.metadata.bind = db_engine
db_session = sqlalchemy.orm.sessionmaker(bind=db_engine)
db = db_session()
# Create the user
legendary_waffle.model_create(db, legendary_waffle.models.User, name='sk4ly')
print "Users: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.User))
# Create the universe
universe_config = {
"name": 'poopiverse',
"map_size": 1000,
"max_planets": 1000,
"max_players": 10
}
legendary_waffle.model_create(db, legendary_waffle.models.Universe, **universe_config)
print "Universe: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.Universe))
# Create the planet
planet_config = {
"universe": 1, # The pkid of the universe 'poopiverse'
"coordinate_x": 1,
"coordinate_y": 1,
"name": 'bloth',
"habitable": True,
"player_control": 1, # The pkid of user 'sk4ly'
"default_condition": 1000,
"default_resources": 1000,
"current_condition": 1000,
"current_resources": 1000
}
legendary_waffle.model_create(db, legendary_waffle.models.Planet, **planet_config)
print "Planet: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.Planet))
# Create building type name
legendary_waffle.model_create(db, legendary_waffle.models.BuildingTypeName, name="Control Center")
print "Building Type Name: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.BuildingTypeName))
# Create building type
building_type_config = {
"typename": 1, # The pkid of the building type name 'Control Center'
"description": "This is the control center",
"default_condition": 100,
"default_firepower": 0,
"default_storage": 100,
"rhr_passive": 0,
"rhr_active": 0,
"rhr_destructive": 0,
"build_resource_reqs": 500,
}
legendary_waffle.model_create(db, legendary_waffle.models.BuildingType, **building_type_config)
print "Building Type: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.BuildingType))
# Now create our new building
building_config = {
"building_type": 1, # The pkid of the building type with the name 'Control Center'
"universe": 1, # The pkid of the universe 'poopiverse'
"planet": 1, # The pkid of the planet 'bloth'
"player_control": 1, # The pkid of the user 'sk4ly'
}
legendary_waffle.model_create(db, legendary_waffle.models.Building, **building_config)
print "Building: {}".format(legendary_waffle.model_read(db, legendary_waffle.models.Building))
| bplower/legendary-waffle-lib | test/test_universe_create.py | Python | apache-2.0 | 2,904 |
"""heroku_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from blog.views import index, signup, login, logout
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^signup', signup, name='signup'),
url(r'^login', login, name='login'),
url(r'^logout', logout, name='logout'),
url(r'^admin/', include(admin.site.urls)),
]
| barbossa/django-heroku-blog | heroku_blog/urls.py | Python | apache-2.0 | 1,004 |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for nova websocketproxy."""
import mock
from nova.console import websocketproxy
from nova import exception
from nova import test
class NovaProxyRequestHandlerBaseTestCase(test.TestCase):
def setUp(self):
super(NovaProxyRequestHandlerBaseTestCase, self).setUp()
self.wh = websocketproxy.NovaProxyRequestHandlerBase()
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "ws://127.0.0.1/?token=123-456-789"
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_token_invalid(self, check_token):
check_token.return_value = False
self.wh.path = "ws://127.0.0.1/?token=XXX"
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
check_token.assert_called_with(mock.ANY, token="XXX")
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader.return_value = "token=123-456-789"
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_token_invalid(self, check_token):
check_token.return_value = False
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader.return_value = "token=XXX"
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
check_token.assert_called_with(mock.ANY, token="XXX")
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_internal_access_path(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'internal_access_path': 'vmid'
}
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "ws://127.0.0.1/?token=123-456-789"
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with(tsock)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_internal_access_path_err(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'internal_access_path': 'xxx'
}
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "ws://127.0.0.1/?token=123-456-789"
self.assertRaises(Exception, self.wh.new_websocket_client) # noqa
check_token.assert_called_with(mock.ANY, token="123-456-789")
| berrange/nova | nova/tests/console/test_websocketproxy.py | Python | apache-2.0 | 4,576 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq add rack --bunker`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.add_rack import CommandAddRack
class CommandAddRackBunker(CommandAddRack):
required_parameters = ["bunker", "row", "column"]
| quattor/aquilon | lib/aquilon/worker/commands/add_rack_bunker.py | Python | apache-2.0 | 1,006 |
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from rally.common import logging
from rally.common.utils import RandomNameGeneratorMixin
from rally_ovs.plugins.ovs import ovsclients
from rally_ovs.plugins.ovs import utils
LOG = logging.getLogger(__name__)
class OvnClientMixin(ovsclients.ClientsMixin, RandomNameGeneratorMixin):
def _get_ovn_controller(self, install_method="sandbox"):
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", install_method,
self.context['controller']['host_container'])
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
return ovn_nbctl
def _start_daemon(self):
ovn_nbctl = self._get_ovn_controller(self.install_method)
return ovn_nbctl.start_daemon()
def _stop_daemon(self):
ovn_nbctl = self._get_ovn_controller(self.install_method)
ovn_nbctl.stop_daemon()
def _restart_daemon(self):
self._stop_daemon()
return self._start_daemon()
def _create_lswitches(self, lswitch_create_args, num_switches=-1):
self.RESOURCE_NAME_FORMAT = "lswitch_XXXXXX_XXXXXX"
if (num_switches == -1):
num_switches = lswitch_create_args.get("amount", 1)
batch = lswitch_create_args.get("batch", num_switches)
start_cidr = lswitch_create_args.get("start_cidr", "")
if start_cidr:
start_cidr = netaddr.IPNetwork(start_cidr)
mcast_snoop = lswitch_create_args.get("mcast_snoop", "true")
mcast_idle = lswitch_create_args.get("mcast_idle_timeout", 300)
mcast_table_size = lswitch_create_args.get("mcast_table_size", 2048)
LOG.info("Create lswitches method: %s" % self.install_method)
ovn_nbctl = self._get_ovn_controller(self.install_method)
ovn_nbctl.enable_batch_mode()
flush_count = batch
lswitches = []
for i in range(num_switches):
name = self.generate_random_name()
if start_cidr:
cidr = start_cidr.next(i)
name = "lswitch_%s" % cidr
else:
name = self.generate_random_name()
other_cfg = {
'mcast_snoop': mcast_snoop,
'mcast_idle_timeout': mcast_idle,
'mcast_table_size': mcast_table_size
}
lswitch = ovn_nbctl.lswitch_add(name, other_cfg)
if start_cidr:
lswitch["cidr"] = cidr
LOG.info("create %(name)s %(cidr)s" % \
{"name": name, "cidr": lswitch.get("cidr", "")})
lswitches.append(lswitch)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush() # ensure all commands be run
ovn_nbctl.enable_batch_mode(False)
return lswitches
def _create_routers(self, router_create_args):
self.RESOURCE_NAME_FORMAT = "lrouter_XXXXXX_XXXXXX"
amount = router_create_args.get("amount", 1)
batch = router_create_args.get("batch", 1)
ovn_nbctl = self._get_ovn_controller(self.install_method)
ovn_nbctl.enable_batch_mode()
flush_count = batch
lrouters = []
for i in range(amount):
name = self.generate_random_name()
lrouter = ovn_nbctl.lrouter_add(name)
lrouters.append(lrouter)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush() # ensure all commands be run
ovn_nbctl.enable_batch_mode(False)
return lrouters
def _connect_network_to_router(self, router, network):
LOG.info("Connect network %s to router %s" % (network["name"], router["name"]))
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
base_mac = [i[:2] for i in self.task["uuid"].split('-')]
base_mac[0] = str(hex(int(base_mac[0], 16) & 254))
base_mac[3:] = ['00']*3
mac = utils.get_random_mac(base_mac)
lrouter_port = ovn_nbctl.lrouter_port_add(router["name"], network["name"], mac,
str(network["cidr"]))
ovn_nbctl.flush()
switch_router_port = "rp-" + network["name"]
lport = ovn_nbctl.lswitch_port_add(network["name"], switch_router_port)
ovn_nbctl.db_set('Logical_Switch_Port', switch_router_port,
('options', {"router-port":network["name"]}),
('type', 'router'),
('address', 'router'))
ovn_nbctl.flush()
def _connect_networks_to_routers(self, lnetworks, lrouters, networks_per_router):
for lrouter in lrouters:
LOG.info("Connect %s networks to router %s" % (networks_per_router, lrouter["name"]))
for lnetwork in lnetworks[:networks_per_router]:
LOG.info("connect networks %s cidr %s" % (lnetwork["name"], lnetwork["cidr"]))
self._connect_network_to_router(lrouter, lnetwork)
lnetworks = lnetworks[networks_per_router:]
| openvswitch/ovn-scale-test | rally_ovs/plugins/ovs/ovnclients.py | Python | apache-2.0 | 5,955 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.spanner_v1.proto import (
result_set_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2,
)
from google.cloud.spanner_v1.proto import (
spanner_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2,
)
from google.cloud.spanner_v1.proto import (
transaction_pb2 as google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class SpannerStub(object):
"""Cloud Spanner API
The Cloud Spanner API can be used to manage sessions and execute
transactions on data stored in Cloud Spanner databases.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateSession = channel.unary_unary(
"/google.spanner.v1.Spanner/CreateSession",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString,
)
self.BatchCreateSessions = channel.unary_unary(
"/google.spanner.v1.Spanner/BatchCreateSessions",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.FromString,
)
self.GetSession = channel.unary_unary(
"/google.spanner.v1.Spanner/GetSession",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.FromString,
)
self.ListSessions = channel.unary_unary(
"/google.spanner.v1.Spanner/ListSessions",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.FromString,
)
self.DeleteSession = channel.unary_unary(
"/google.spanner.v1.Spanner/DeleteSession",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ExecuteSql = channel.unary_unary(
"/google.spanner.v1.Spanner/ExecuteSql",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString,
)
self.ExecuteStreamingSql = channel.unary_stream(
"/google.spanner.v1.Spanner/ExecuteStreamingSql",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString,
)
self.ExecuteBatchDml = channel.unary_unary(
"/google.spanner.v1.Spanner/ExecuteBatchDml",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.FromString,
)
self.Read = channel.unary_unary(
"/google.spanner.v1.Spanner/Read",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.FromString,
)
self.StreamingRead = channel.unary_stream(
"/google.spanner.v1.Spanner/StreamingRead",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.FromString,
)
self.BeginTransaction = channel.unary_unary(
"/google.spanner.v1.Spanner/BeginTransaction",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.FromString,
)
self.Commit = channel.unary_unary(
"/google.spanner.v1.Spanner/Commit",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.FromString,
)
self.Rollback = channel.unary_unary(
"/google.spanner.v1.Spanner/Rollback",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.PartitionQuery = channel.unary_unary(
"/google.spanner.v1.Spanner/PartitionQuery",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString,
)
self.PartitionRead = channel.unary_unary(
"/google.spanner.v1.Spanner/PartitionRead",
request_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.FromString,
)
class SpannerServicer(object):
"""Cloud Spanner API
The Cloud Spanner API can be used to manage sessions and execute
transactions on data stored in Cloud Spanner databases.
"""
def CreateSession(self, request, context):
"""Creates a new session. A session can be used to perform
transactions that read and/or modify data in a Cloud Spanner database.
Sessions are meant to be reused for many consecutive
transactions.
Sessions can only execute one transaction at a time. To execute
multiple concurrent read-write/write-only transactions, create
multiple sessions. Note that standalone reads and queries use a
transaction internally, and count toward the one transaction
limit.
Active sessions use additional server resources, so it is a good idea to
delete idle and unneeded sessions.
Aside from explicit deletes, Cloud Spanner can delete sessions for which no
operations are sent for more than an hour. If a session is deleted,
requests to it return `NOT_FOUND`.
Idle sessions can be kept alive by sending a trivial SQL query
periodically, e.g., `"SELECT 1"`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BatchCreateSessions(self, request, context):
"""Creates multiple new sessions.
This API can be used to initialize a session cache on the clients.
See https://goo.gl/TgSFN2 for best practices on session cache management.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetSession(self, request, context):
"""Gets a session. Returns `NOT_FOUND` if the session does not exist.
This is mainly useful for determining whether a session is still
alive.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListSessions(self, request, context):
"""Lists all sessions in a given database.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteSession(self, request, context):
"""Ends a session, releasing server resources associated with it. This will
asynchronously trigger cancellation of any operations that are running with
this session.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExecuteSql(self, request, context):
"""Executes an SQL statement, returning all results in a single reply. This
method cannot be used to return a result set larger than 10 MiB;
if the query yields more data than that, the query fails with
a `FAILED_PRECONDITION` error.
Operations inside read-write transactions might return `ABORTED`. If
this occurs, the application should restart the transaction from
the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
Larger result sets can be fetched in streaming fashion by calling
[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExecuteStreamingSql(self, request, context):
"""Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result
set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there
is no limit on the size of the returned result set. However, no
individual row in the result set can exceed 100 MiB, and no
column value can exceed 10 MiB.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExecuteBatchDml(self, request, context):
"""Executes a batch of SQL DML statements. This method allows many statements
to be run with lower latency than submitting them sequentially with
[ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
Statements are executed in sequential order. A request can succeed even if
a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the
response provides information about the statement that failed. Clients must
inspect this field to determine whether an error occurred.
Execution stops after the first failed statement; the remaining statements
are not executed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Read(self, request, context):
"""Reads rows from the database using key lookups and scans, as a
simple key/value style alternative to
[ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to
return a result set larger than 10 MiB; if the read matches more
data than that, the read fails with a `FAILED_PRECONDITION`
error.
Reads inside read-write transactions might return `ABORTED`. If
this occurs, the application should restart the transaction from
the beginning. See [Transaction][google.spanner.v1.Transaction] for more details.
Larger result sets can be yielded in streaming fashion by calling
[StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def StreamingRead(self, request, context):
"""Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a
stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the
size of the returned result set. However, no individual row in
the result set can exceed 100 MiB, and no column value can exceed
10 MiB.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def BeginTransaction(self, request, context):
"""Begins a new transaction. This step can often be skipped:
[Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
[Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
side-effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Commit(self, request, context):
"""Commits a transaction. The request includes the mutations to be
applied to rows in the database.
`Commit` might return an `ABORTED` error. This can occur at any time;
commonly, the cause is conflicts with concurrent
transactions. However, it can also happen for a variety of other
reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
the transaction from the beginning, re-using the same session.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Rollback(self, request, context):
"""Rolls back a transaction, releasing any locks it holds. It is a good
idea to call this for any transaction that includes one or more
[Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and
ultimately decides not to commit.
`Rollback` returns `OK` if it successfully aborts the transaction, the
transaction was already aborted, or the transaction is not
found. `Rollback` never returns `ABORTED`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PartitionQuery(self, request, context):
"""Creates a set of partition tokens that can be used to execute a query
operation in parallel. Each of the returned partition tokens can be used
by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset
of the query result to read. The same session and read-only transaction
must be used by the PartitionQueryRequest used to create the
partition tokens and the ExecuteSqlRequests that use the partition tokens.
Partition tokens become invalid when the session used to create them
is deleted, is idle for too long, begins a new transaction, or becomes too
old. When any of these happen, it is not possible to resume the query, and
the whole operation must be restarted from the beginning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def PartitionRead(self, request, context):
"""Creates a set of partition tokens that can be used to execute a read
operation in parallel. Each of the returned partition tokens can be used
by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read
result to read. The same session and read-only transaction must be used by
the PartitionReadRequest used to create the partition tokens and the
ReadRequests that use the partition tokens. There are no ordering
guarantees on rows returned among the returned partition tokens, or even
within each individual StreamingRead call issued with a partition_token.
Partition tokens become invalid when the session used to create them
is deleted, is idle for too long, begins a new transaction, or becomes too
old. When any of these happen, it is not possible to resume the read, and
the whole operation must be restarted from the beginning.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_SpannerServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateSession": grpc.unary_unary_rpc_method_handler(
servicer.CreateSession,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CreateSessionRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString,
),
"BatchCreateSessions": grpc.unary_unary_rpc_method_handler(
servicer.BatchCreateSessions,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BatchCreateSessionsResponse.SerializeToString,
),
"GetSession": grpc.unary_unary_rpc_method_handler(
servicer.GetSession,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.GetSessionRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.Session.SerializeToString,
),
"ListSessions": grpc.unary_unary_rpc_method_handler(
servicer.ListSessions,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ListSessionsResponse.SerializeToString,
),
"DeleteSession": grpc.unary_unary_rpc_method_handler(
servicer.DeleteSession,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.DeleteSessionRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ExecuteSql": grpc.unary_unary_rpc_method_handler(
servicer.ExecuteSql,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString,
),
"ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler(
servicer.ExecuteStreamingSql,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteSqlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString,
),
"ExecuteBatchDml": grpc.unary_unary_rpc_method_handler(
servicer.ExecuteBatchDml,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ExecuteBatchDmlResponse.SerializeToString,
),
"Read": grpc.unary_unary_rpc_method_handler(
servicer.Read,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.ResultSet.SerializeToString,
),
"StreamingRead": grpc.unary_stream_rpc_method_handler(
servicer.StreamingRead,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.ReadRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_result__set__pb2.PartialResultSet.SerializeToString,
),
"BeginTransaction": grpc.unary_unary_rpc_method_handler(
servicer.BeginTransaction,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.BeginTransactionRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_transaction__pb2.Transaction.SerializeToString,
),
"Commit": grpc.unary_unary_rpc_method_handler(
servicer.Commit,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.CommitResponse.SerializeToString,
),
"Rollback": grpc.unary_unary_rpc_method_handler(
servicer.Rollback,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.RollbackRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"PartitionQuery": grpc.unary_unary_rpc_method_handler(
servicer.PartitionQuery,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionQueryRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString,
),
"PartitionRead": grpc.unary_unary_rpc_method_handler(
servicer.PartitionRead,
request_deserializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionReadRequest.FromString,
response_serializer=google_dot_cloud_dot_spanner__v1_dot_proto_dot_spanner__pb2.PartitionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.spanner.v1.Spanner", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| tseaver/google-cloud-python | spanner/google/cloud/spanner_v1/proto/spanner_pb2_grpc.py | Python | apache-2.0 | 22,552 |
# -*- coding: utf-8 -*-
import allure
from selenium.webdriver.common.by import By
from .base import BasePage
from .elements import SimpleInput, SimpleText
from .blocks.nav import NavBlock
class BrowseMoviePageLocators(object):
"""Локаторы страницы просмотра информации о фильме"""
TITLE_LOCATOR = (By.CSS_SELECTOR, '#movie h2')
COUNTRY_LOCATOR = (By.NAME, 'country')
DIRECTOR_LOCATOR = (By.NAME, 'director')
WRITER_LOCATOR = (By.NAME, 'writer')
PRODUCER_LOCATOR = (By.NAME, 'producer')
EDIT_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Edit"]')
REMOVE_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Remove"]')
class BrowseMoviePage(BasePage):
"""Страница просмотра информации о фильме"""
def __init__(self, driver):
super(BrowseMoviePage, self).__init__(driver)
self.nav = NavBlock(driver)
title = SimpleText(BrowseMoviePageLocators.TITLE_LOCATOR)
director = SimpleText(BrowseMoviePageLocators.DIRECTOR_LOCATOR)
writer = SimpleText(BrowseMoviePageLocators.WRITER_LOCATOR)
producer = SimpleText(BrowseMoviePageLocators.PRODUCER_LOCATOR)
@allure.step('Нажмем на кноку "Edit"')
def click_edit_button(self):
"""
:rtype: EditMoviePage
"""
self._click(BrowseMoviePageLocators.EDIT_BUTTON_LOCATOR)
return EditMoviePage(self._driver)
@allure.step('Нажмем на кноку "Remove"')
def click_remove_button(self):
"""
:rtype: HomePage
"""
self._click(BrowseMoviePageLocators.REMOVE_BUTTON_LOCATOR)
self.alert_accept()
from .home import HomePage
return HomePage(self._driver)
class AddMoviePageLocators(object):
"""Локаторы страницы создания описания фильма"""
TITLE_INPUT_LOCATOR = (By.NAME, 'name')
TITLE_INPUT_ERROR_LOCATOR = (By.CSS_SELECTOR, 'input[name="name"].error')
ALSO_KNOWN_AS_INPUT_LOCATOR = (By.NAME, 'aka')
YEAR_INPUT_LOCATOR = (By.NAME, 'year')
YEAR_INPUT_ERROR_LOCATOR = (By.CSS_SELECTOR, 'input[name="year"].error')
DURATION_INPUT_LOCATOR = (By.NAME, 'duration')
TRAILER_URL_INPUT_LOCATOR = (By.NAME, 'trailer')
FORMAT_INPUT_LOCATOR = (By.NAME, 'format')
COUNTRY_INPUT_LOCATOR = (By.NAME, 'country')
DIRECTOR_INPUT_LOCATOR = (By.NAME, 'director')
WRITER_INPUT_LOCATOR = (By.NAME, 'writer')
PRODUCER_INPUT_LOCATOR = (By.NAME, 'producer')
SAVE_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Save"]')
class AddMoviePage(BasePage):
"""Страница создания описания фильма"""
def __init__(self, driver):
super(AddMoviePage, self).__init__(driver)
self.nav = NavBlock(driver)
title = SimpleInput(AddMoviePageLocators.TITLE_INPUT_LOCATOR, 'название фильма')
also_know_as = SimpleInput(AddMoviePageLocators.ALSO_KNOWN_AS_INPUT_LOCATOR, 'оригинальное название фильма')
year = SimpleInput(AddMoviePageLocators.YEAR_INPUT_LOCATOR, 'год')
duration = SimpleInput(AddMoviePageLocators.DURATION_INPUT_LOCATOR, 'продолжительность')
trailer_url = SimpleInput(AddMoviePageLocators.TRAILER_URL_INPUT_LOCATOR, 'адрес трейлера')
format = SimpleInput(AddMoviePageLocators.FORMAT_INPUT_LOCATOR, 'формат')
country = SimpleInput(AddMoviePageLocators.COUNTRY_INPUT_LOCATOR, 'страну')
director = SimpleInput(AddMoviePageLocators.DIRECTOR_INPUT_LOCATOR, 'директора')
writer = SimpleInput(AddMoviePageLocators.WRITER_INPUT_LOCATOR, 'сценариста')
producer = SimpleInput(AddMoviePageLocators.PRODUCER_INPUT_LOCATOR, 'продюсера')
@allure.step('Нажмем на кноку "Save"')
def click_save_button(self):
"""
:rtype: BrowseMoviePage
"""
self._click(AddMoviePageLocators.SAVE_BUTTON_LOCATOR)
return BrowseMoviePage(self._driver)
def title_field_is_required_present(self):
"""
:rtype: bool
"""
return self._is_element_present(AddMoviePageLocators.TITLE_INPUT_ERROR_LOCATOR)
def year_field_is_required_present(self):
"""
:rtype: bool
"""
return self._is_element_present(AddMoviePageLocators.YEAR_INPUT_ERROR_LOCATOR)
class EditMoviePageLocators(object):
"""Локаторы для страницы редактирования описания фильма"""
REMOVE_BUTTON_LOCATOR = (By.CSS_SELECTOR, 'img[title="Remove"]')
class EditMoviePage(AddMoviePage):
"""Страница редактирования описания фильма"""
@allure.step('Нажмем на кноку "Remove"')
def click_remove_button(self):
"""
:rtype: HomePage
"""
self._click(EditMoviePageLocators.REMOVE_BUTTON_LOCATOR)
self.alert_accept()
from .home import HomePage
return HomePage(self._driver)
| adv-tsk/Course-Se-Python | php4dvd/pages/movie.py | Python | apache-2.0 | 5,067 |
"""
Copyright 2015-2018 IBM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Licensed Materials - Property of IBM
© Copyright IBM Corp. 2015-2018
"""
import asyncio
from confluent_kafka import Producer
class ProducerTask(object):
def __init__(self, conf, topic_name):
self.topic_name = topic_name
self.producer = Producer(conf)
self.counter = 0
self.running = True
def stop(self):
self.running = False
def on_delivery(self, err, msg):
if err:
print('Delivery report: Failed sending message {0}'.format(msg.value()))
print(err)
# We could retry sending the message
else:
print('Message produced, offset: {0}'.format(msg.offset()))
@asyncio.coroutine
def run(self):
print('The producer has started')
while self.running:
message = 'This is a test message #{0}'.format(self.counter)
key = 'key'
sleep = 2 # Short sleep for flow control
try:
self.producer.produce(self.topic_name, message, key, -1, self.on_delivery)
self.producer.poll(0)
self.counter += 1
except Exception as err:
print('Failed sending message {0}'.format(message))
print(err)
sleep = 5 # Longer sleep before retrying
yield from asyncio.sleep(sleep)
self.producer.flush()
| ibm-messaging/message-hub-samples | kafka-python-console-sample/producertask.py | Python | apache-2.0 | 1,944 |
# -*- coding: utf-8 -*-
import unittest
import pykintone
from pykintone.model import kintoneModel
import tests.envs as envs
class TestAppModelSimple(kintoneModel):
def __init__(self):
super(TestAppModelSimple, self).__init__()
self.my_key = ""
self.stringField = ""
class TestComment(unittest.TestCase):
def test_comment(self):
app = pykintone.load(envs.FILE_PATH).app()
model = TestAppModelSimple()
model.my_key = "comment_test"
model.stringField = "comment_test_now"
result = app.create(model)
self.assertTrue(result.ok) # confirm create the record to test comment
_record_id = result.record_id
# create comment
r_created = app.comment(_record_id).create("コメントのテスト")
self.assertTrue(r_created.ok)
# it requires Administrator user is registered in kintone
r_created_m = app.comment(_record_id).create("メンションのテスト", [("Administrator", "USER")])
self.assertTrue(r_created_m.ok)
# select comment
r_selected = app.comment(_record_id).select(True, 0, 10)
self.assertTrue(r_selected.ok)
self.assertTrue(2, len(r_selected.raw_comments))
comments = r_selected.comments()
self.assertTrue(1, len(comments[-1].mentions))
# delete comment
for c in comments:
r_deleted = app.comment(_record_id).delete(c.comment_id)
self.assertTrue(r_deleted.ok)
r_selected = app.comment(_record_id).select()
self.assertEqual(0, len(r_selected.raw_comments))
# done test
app.delete(_record_id)
| icoxfog417/pykintone | tests/test_comment.py | Python | apache-2.0 | 1,667 |
# -*- coding: utf-8 -*-
'''
The Salt Key backend API and interface used by the CLI. The Key class can be
used to manage salt keys directly without interfacing with the CLI.
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import copy
import json
import stat
import shutil
import fnmatch
import hashlib
import logging
# Import salt libs
import salt.crypt
import salt.utils
import salt.exceptions
import salt.utils.event
import salt.daemons.masterapi
from salt.utils import kinds
from salt.utils.event import tagify
# Import third party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import input
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
import msgpack
except ImportError:
pass
log = logging.getLogger(__name__)
def get_key(opts):
if opts['transport'] in ('zeromq', 'tcp'):
return Key(opts)
else:
return RaetKey(opts)
class KeyCLI(object):
'''
Manage key CLI operations
'''
def __init__(self, opts):
self.opts = opts
if self.opts['transport'] in ('zeromq', 'tcp'):
self.key = Key(opts)
else:
self.key = RaetKey(opts)
def list_status(self, status):
'''
Print out the keys under a named status
:param str status: A string indicating which set of keys to return
'''
keys = self.key.list_keys()
if status.startswith('acc'):
salt.output.display_output(
{self.key.ACC: keys[self.key.ACC]},
'key',
self.opts
)
elif status.startswith(('pre', 'un')):
salt.output.display_output(
{self.key.PEND: keys[self.key.PEND]},
'key',
self.opts
)
elif status.startswith('rej'):
salt.output.display_output(
{self.key.REJ: keys[self.key.REJ]},
'key',
self.opts
)
elif status.startswith('den'):
if self.key.DEN:
salt.output.display_output(
{self.key.DEN: keys[self.key.DEN]},
'key',
self.opts
)
elif status.startswith('all'):
self.list_all()
def list_all(self):
'''
Print out all keys
'''
salt.output.display_output(
self.key.list_keys(),
'key',
self.opts)
def accept(self, match, include_rejected=False):
'''
Accept the keys matched
:param str match: A string to match against. i.e. 'web*'
:param bool include_rejected: Whether or not to accept a matched key that was formerly rejected
'''
def _print_accepted(matches, after_match):
if self.key.ACC in after_match:
accepted = sorted(
set(after_match[self.key.ACC]).difference(
set(matches.get(self.key.ACC, []))
)
)
for key in accepted:
print('Key for minion {0} accepted.'.format(key))
matches = self.key.name_match(match)
keys = {}
if self.key.PEND in matches:
keys[self.key.PEND] = matches[self.key.PEND]
if include_rejected and bool(matches.get(self.key.REJ)):
keys[self.key.REJ] = matches[self.key.REJ]
if not keys:
msg = (
'The key glob {0!r} does not match any unaccepted {1}keys.'
.format(match, 'or rejected ' if include_rejected else '')
)
print(msg)
raise salt.exceptions.SaltSystemExit(code=1)
if not self.opts.get('yes', False):
print('The following keys are going to be accepted:')
salt.output.display_output(
keys,
'key',
self.opts)
try:
veri = input('Proceed? [n/Y] ')
except KeyboardInterrupt:
raise SystemExit("\nExiting on CTRL-c")
if not veri or veri.lower().startswith('y'):
_print_accepted(
matches,
self.key.accept(
match_dict=keys,
include_rejected=include_rejected
)
)
else:
print('The following keys are going to be accepted:')
salt.output.display_output(
keys,
'key',
self.opts)
_print_accepted(
matches,
self.key.accept(
match_dict=keys,
include_rejected=include_rejected
)
)
def accept_all(self, include_rejected=False):
'''
Accept all keys
:param bool include_rejected: Whether or not to accept a matched key that was formerly rejected
'''
self.accept('*', include_rejected=include_rejected)
def delete(self, match):
'''
Delete the matched keys
:param str match: A string to match against. i.e. 'web*'
'''
def _print_deleted(matches, after_match):
deleted = []
for keydir in (self.key.ACC, self.key.PEND, self.key.REJ):
deleted.extend(list(
set(matches.get(keydir, [])).difference(
set(after_match.get(keydir, []))
)
))
for key in sorted(deleted):
print('Key for minion {0} deleted.'.format(key))
matches = self.key.name_match(match)
if not matches:
print(
'The key glob {0!r} does not match any accepted, unaccepted '
'or rejected keys.'.format(match)
)
raise salt.exceptions.SaltSystemExit(code=1)
if not self.opts.get('yes', False):
print('The following keys are going to be deleted:')
salt.output.display_output(
matches,
'key',
self.opts)
try:
veri = input('Proceed? [N/y] ')
except KeyboardInterrupt:
raise SystemExit("\nExiting on CTRL-c")
if veri.lower().startswith('y'):
_print_deleted(
matches,
self.key.delete_key(match_dict=matches)
)
else:
print('Deleting the following keys:')
salt.output.display_output(
matches,
'key',
self.opts)
_print_deleted(
matches,
self.key.delete_key(match_dict=matches)
)
def delete_all(self):
'''
Delete all keys
'''
self.delete('*')
def reject(self, match, include_accepted=False):
'''
Reject the matched keys
:param str match: A string to match against. i.e. 'web*'
:param bool include_accepted: Whether or not to accept a matched key that was formerly accepted
'''
def _print_rejected(matches, after_match):
if self.key.REJ in after_match:
rejected = sorted(
set(after_match[self.key.REJ]).difference(
set(matches.get(self.key.REJ, []))
)
)
for key in rejected:
print('Key for minion {0} rejected.'.format(key))
matches = self.key.name_match(match)
keys = {}
if self.key.PEND in matches:
keys[self.key.PEND] = matches[self.key.PEND]
if include_accepted and bool(matches.get(self.key.ACC)):
keys[self.key.ACC] = matches[self.key.ACC]
if not keys:
msg = 'The key glob {0!r} does not match any {1} keys.'.format(
match,
'accepted or unaccepted' if include_accepted else 'unaccepted'
)
print(msg)
return
if not self.opts.get('yes', False):
print('The following keys are going to be rejected:')
salt.output.display_output(
keys,
'key',
self.opts)
veri = input('Proceed? [n/Y] ')
if veri.lower().startswith('n'):
return
_print_rejected(
matches,
self.key.reject(
match_dict=matches,
include_accepted=include_accepted
)
)
def reject_all(self, include_accepted=False):
'''
Reject all keys
:param bool include_accepted: Whether or not to accept a matched key that was formerly accepted
'''
self.reject('*', include_accepted=include_accepted)
def print_key(self, match):
'''
Print out a single key
:param str match: A string to match against. i.e. 'web*'
'''
matches = self.key.key_str(match)
salt.output.display_output(
matches,
'key',
self.opts)
def print_all(self):
'''
Print out all managed keys
'''
self.print_key('*')
def finger(self, match):
'''
Print out the fingerprints for the matched keys
:param str match: A string to match against. i.e. 'web*'
'''
matches = self.key.finger(match)
salt.output.display_output(
matches,
'key',
self.opts)
def finger_all(self):
'''
Print out all fingerprints
'''
matches = self.key.finger('*')
salt.output.display_output(
matches,
'key',
self.opts)
def prep_signature(self):
'''
Searches for usable keys to create the
master public-key signature
'''
self.privkey = None
self.pubkey = None
# check given pub-key
if self.opts['pub']:
if not os.path.isfile(self.opts['pub']):
print('Public-key {0} does not exist'.format(self.opts['pub']))
return
self.pubkey = self.opts['pub']
# default to master.pub
else:
mpub = self.opts['pki_dir'] + '/' + 'master.pub'
if os.path.isfile(mpub):
self.pubkey = mpub
# check given priv-key
if self.opts['priv']:
if not os.path.isfile(self.opts['priv']):
print('Private-key {0} does not exist'.format(self.opts['priv']))
return
self.privkey = self.opts['priv']
# default to master_sign.pem
else:
mpriv = self.opts['pki_dir'] + '/' + 'master_sign.pem'
if os.path.isfile(mpriv):
self.privkey = mpriv
if not self.privkey:
if self.opts['auto_create']:
print('Generating new signing key-pair {0}.* in {1}'
''.format(self.opts['master_sign_key_name'],
self.opts['pki_dir']))
salt.crypt.gen_keys(self.opts['pki_dir'],
self.opts['master_sign_key_name'],
self.opts['keysize'],
self.opts.get('user'))
self.privkey = self.opts['pki_dir'] + '/' + self.opts['master_sign_key_name'] + '.pem'
else:
print('No usable private-key found')
return
if not self.pubkey:
print('No usable public-key found')
return
print('Using public-key {0}'.format(self.pubkey))
print('Using private-key {0}'.format(self.privkey))
if self.opts['signature_path']:
if not os.path.isdir(self.opts['signature_path']):
print('target directory {0} does not exist'
''.format(self.opts['signature_path']))
else:
self.opts['signature_path'] = self.opts['pki_dir']
sign_path = self.opts['signature_path'] + '/' + self.opts['master_pubkey_signature']
self.key.gen_signature(self.privkey,
self.pubkey,
sign_path)
def run(self):
'''
Run the logic for saltkey
'''
if self.opts['gen_keys']:
self.key.gen_keys()
return
elif self.opts['gen_signature']:
self.prep_signature()
return
if self.opts['list']:
self.list_status(self.opts['list'])
elif self.opts['list_all']:
self.list_all()
elif self.opts['print']:
self.print_key(self.opts['print'])
elif self.opts['print_all']:
self.print_all()
elif self.opts['accept']:
self.accept(
self.opts['accept'],
include_rejected=self.opts['include_all']
)
elif self.opts['accept_all']:
self.accept_all(include_rejected=self.opts['include_all'])
elif self.opts['reject']:
self.reject(
self.opts['reject'],
include_accepted=self.opts['include_all']
)
elif self.opts['reject_all']:
self.reject_all(include_accepted=self.opts['include_all'])
elif self.opts['delete']:
self.delete(self.opts['delete'])
elif self.opts['delete_all']:
self.delete_all()
elif self.opts['finger']:
self.finger(self.opts['finger'])
elif self.opts['finger_all']:
self.finger_all()
else:
self.list_all()
class MultiKeyCLI(KeyCLI):
'''
Manage multiple key backends from the CLI
'''
def __init__(self, opts):
opts['__multi_key'] = True
super(MultiKeyCLI, self).__init__(opts)
# Remove the key attribute set in KeyCLI.__init__
delattr(self, 'key')
zopts = copy.copy(opts)
ropts = copy.copy(opts)
self.keys = {}
zopts['transport'] = 'zeromq'
self.keys['ZMQ Keys'] = KeyCLI(zopts)
ropts['transport'] = 'raet'
self.keys['RAET Keys'] = KeyCLI(ropts)
def _call_all(self, fun, *args):
'''
Call the given function on all backend keys
'''
for kback in self.keys:
print(kback)
getattr(self.keys[kback], fun)(*args)
def list_status(self, status):
self._call_all('list_status', status)
def list_all(self):
self._call_all('list_all')
def accept(self, match, include_rejected=False):
self._call_all('accept', match, include_rejected)
def accept_all(self, include_rejected=False):
self._call_all('accept_all', include_rejected)
def delete(self, match):
self._call_all('delete', match)
def delete_all(self):
self._call_all('delete_all')
def reject(self, match, include_accepted=False):
self._call_all('reject', match, include_accepted)
def reject_all(self, include_accepted=False):
self._call_all('reject_all', include_accepted)
def print_key(self, match):
self._call_all('print_key', match)
def print_all(self):
self._call_all('print_all')
def finger(self, match):
self._call_all('finger', match)
def finger_all(self):
self._call_all('finger_all')
def prep_signature(self):
self._call_all('prep_signature')
class Key(object):
'''
The object that encapsulates saltkey actions
'''
ACC = 'minions'
PEND = 'minions_pre'
REJ = 'minions_rejected'
DEN = 'minions_denied'
def __init__(self, opts):
self.opts = opts
kind = self.opts.get('__role', '') # application kind
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}'.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
self.event = salt.utils.event.get_event(
kind,
opts['sock_dir'],
opts['transport'],
opts=opts,
listen=False)
def _check_minions_directories(self):
'''
Return the minion keys directory paths
'''
minions_accepted = os.path.join(self.opts['pki_dir'], self.ACC)
minions_pre = os.path.join(self.opts['pki_dir'], self.PEND)
minions_rejected = os.path.join(self.opts['pki_dir'],
self.REJ)
minions_denied = os.path.join(self.opts['pki_dir'],
self.DEN)
return minions_accepted, minions_pre, minions_rejected, minions_denied
def gen_keys(self):
'''
Generate minion RSA public keypair
'''
salt.crypt.gen_keys(
self.opts['gen_keys_dir'],
self.opts['gen_keys'],
self.opts['keysize'])
return
def gen_signature(self, privkey, pubkey, sig_path):
'''
Generate master public-key-signature
'''
return salt.crypt.gen_signature(privkey,
pubkey,
sig_path)
def check_minion_cache(self, preserve_minions=None):
'''
Check the minion cache to make sure that old minion data is cleared
Optionally, pass in a list of minions which should have their caches
preserved. To preserve all caches, set __opts__['preserve_minion_cache']
'''
if preserve_minions is None:
preserve_minions = []
m_cache = os.path.join(self.opts['cachedir'], self.ACC)
if not os.path.isdir(m_cache):
return
keys = self.list_keys()
minions = []
for key, val in six.iteritems(keys):
minions.extend(val)
if not self.opts.get('preserve_minion_cache', False) or not preserve_minions:
for minion in os.listdir(m_cache):
if minion not in minions and minion not in preserve_minions:
shutil.rmtree(os.path.join(m_cache, minion))
def check_master(self):
'''
Log if the master is not running
:rtype: bool
:return: Whether or not the master is running
'''
if not os.path.exists(
os.path.join(
self.opts['sock_dir'],
'publish_pull.ipc'
)
):
return False
return True
def name_match(self, match, full=False):
'''
Accept a glob which to match the of a key and return the key's location
'''
if full:
matches = self.all_keys()
else:
matches = self.list_keys()
ret = {}
if ',' in match and isinstance(match, str):
match = match.split(',')
for status, keys in six.iteritems(matches):
for key in salt.utils.isorted(keys):
if isinstance(match, list):
for match_item in match:
if fnmatch.fnmatch(key, match_item):
if status not in ret:
ret[status] = []
ret[status].append(key)
else:
if fnmatch.fnmatch(key, match):
if status not in ret:
ret[status] = []
ret[status].append(key)
return ret
def dict_match(self, match_dict):
'''
Accept a dictionary of keys and return the current state of the
specified keys
'''
ret = {}
cur_keys = self.list_keys()
for status, keys in six.iteritems(match_dict):
for key in salt.utils.isorted(keys):
for keydir in (self.ACC, self.PEND, self.REJ, self.DEN):
if keydir and fnmatch.filter(cur_keys.get(keydir, []), key):
ret.setdefault(keydir, []).append(key)
return ret
def local_keys(self):
'''
Return a dict of local keys
'''
ret = {'local': []}
for fn_ in salt.utils.isorted(os.listdir(self.opts['pki_dir'])):
if fn_.endswith('.pub') or fn_.endswith('.pem'):
path = os.path.join(self.opts['pki_dir'], fn_)
if os.path.isfile(path):
ret['local'].append(fn_)
return ret
def list_keys(self):
'''
Return a dict of managed keys and what the key status are
'''
key_dirs = []
# We have to differentiate between RaetKey._check_minions_directories
# and Zeromq-Keys. Raet-Keys only have three states while ZeroMQ-keys
# havd an additional 'denied' state.
if self.opts['transport'] in ('zeromq', 'tcp'):
key_dirs = self._check_minions_directories()
else:
key_dirs = self._check_minions_directories()
ret = {}
for dir_ in key_dirs:
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.isorted(os.listdir(dir_)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(fn_)
except (OSError, IOError):
# key dir kind is not created yet, just skip
continue
return ret
def all_keys(self):
'''
Merge managed keys with local keys
'''
keys = self.list_keys()
keys.update(self.local_keys())
return keys
def list_status(self, match):
'''
Return a dict of managed keys under a named status
'''
acc, pre, rej, den = self._check_minions_directories()
ret = {}
if match.startswith('acc'):
ret[os.path.basename(acc)] = []
for fn_ in salt.utils.isorted(os.listdir(acc)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(acc, fn_)):
ret[os.path.basename(acc)].append(fn_)
elif match.startswith('pre') or match.startswith('un'):
ret[os.path.basename(pre)] = []
for fn_ in salt.utils.isorted(os.listdir(pre)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(pre, fn_)):
ret[os.path.basename(pre)].append(fn_)
elif match.startswith('rej'):
ret[os.path.basename(rej)] = []
for fn_ in salt.utils.isorted(os.listdir(rej)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(rej, fn_)):
ret[os.path.basename(rej)].append(fn_)
elif match.startswith('den'):
ret[os.path.basename(den)] = []
for fn_ in salt.utils.isorted(os.listdir(den)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(den, fn_)):
ret[os.path.basename(den)].append(fn_)
elif match.startswith('all'):
return self.all_keys()
return ret
def key_str(self, match):
'''
Return the specified public key or keys based on a glob
'''
ret = {}
for status, keys in six.iteritems(self.name_match(match)):
ret[status] = {}
for key in salt.utils.isorted(keys):
path = os.path.join(self.opts['pki_dir'], status, key)
with salt.utils.fopen(path, 'r') as fp_:
ret[status][key] = fp_.read()
return ret
def key_str_all(self):
'''
Return all managed key strings
'''
ret = {}
for status, keys in six.iteritems(self.list_keys()):
ret[status] = {}
for key in salt.utils.isorted(keys):
path = os.path.join(self.opts['pki_dir'], status, key)
with salt.utils.fopen(path, 'r') as fp_:
ret[status][key] = fp_.read()
return ret
def accept(self, match=None, match_dict=None, include_rejected=False):
'''
Accept public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_rejected:
keydirs.append(self.REJ)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
eload = {'result': True,
'act': 'accept',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (IOError, OSError):
pass
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
)
def accept_all(self):
'''
Accept all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
eload = {'result': True,
'act': 'accept',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (IOError, OSError):
pass
return self.list_keys()
def delete_key(self, match=None, match_dict=None, preserve_minions=False):
'''
Delete public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
To preserve the master caches of minions who are matched, set preserve_minions
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
for status, keys in six.iteritems(matches):
for key in keys:
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
eload = {'result': True,
'act': 'delete',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (OSError, IOError):
pass
self.check_minion_cache(preserve_minions=matches.get('minions', []))
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
)
def delete_all(self):
'''
Delete all keys
'''
for status, keys in six.iteritems(self.list_keys()):
for key in keys:
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
eload = {'result': True,
'act': 'delete',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (OSError, IOError):
pass
self.check_minion_cache()
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return self.list_keys()
def reject(self, match=None, match_dict=None, include_accepted=False):
'''
Reject public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_accepted:
keydirs.append(self.ACC)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
eload = {'result': True,
'act': 'reject',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (IOError, OSError):
pass
self.check_minion_cache()
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
)
def reject_all(self):
'''
Reject all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
eload = {'result': True,
'act': 'reject',
'id': key}
self.event.fire_event(eload, tagify(prefix='key'))
except (IOError, OSError):
pass
self.check_minion_cache()
if self.opts.get('rotate_aes_key'):
salt.crypt.dropfile(self.opts['cachedir'], self.opts['user'])
return self.list_keys()
def finger(self, match):
'''
Return the fingerprint for a specified key
'''
matches = self.name_match(match, True)
ret = {}
for status, keys in six.iteritems(matches):
ret[status] = {}
for key in keys:
if status == 'local':
path = os.path.join(self.opts['pki_dir'], key)
else:
path = os.path.join(self.opts['pki_dir'], status, key)
ret[status][key] = salt.utils.pem_finger(path, sum_type=self.opts['hash_type'])
return ret
def finger_all(self):
'''
Return fingerprins for all keys
'''
ret = {}
for status, keys in six.iteritems(self.list_keys()):
ret[status] = {}
for key in keys:
if status == 'local':
path = os.path.join(self.opts['pki_dir'], key)
else:
path = os.path.join(self.opts['pki_dir'], status, key)
ret[status][key] = salt.utils.pem_finger(path, sum_type=self.opts['hash_type'])
return ret
class RaetKey(Key):
'''
Manage keys from the raet backend
'''
ACC = 'accepted'
PEND = 'pending'
REJ = 'rejected'
DEN = None
def __init__(self, opts):
Key.__init__(self, opts)
self.auto_key = salt.daemons.masterapi.AutoKey(self.opts)
self.serial = salt.payload.Serial(self.opts)
def _check_minions_directories(self):
'''
Return the minion keys directory paths
'''
accepted = os.path.join(self.opts['pki_dir'], self.ACC)
pre = os.path.join(self.opts['pki_dir'], self.PEND)
rejected = os.path.join(self.opts['pki_dir'], self.REJ)
return accepted, pre, rejected
def check_minion_cache(self, preserve_minions=False):
'''
Check the minion cache to make sure that old minion data is cleared
'''
keys = self.list_keys()
minions = []
for key, val in six.iteritems(keys):
minions.extend(val)
m_cache = os.path.join(self.opts['cachedir'], 'minions')
if os.path.isdir(m_cache):
for minion in os.listdir(m_cache):
if minion not in minions:
shutil.rmtree(os.path.join(m_cache, minion))
kind = self.opts.get('__role', '') # application kind
if kind not in kinds.APPL_KINDS:
emsg = ("Invalid application kind = '{0}'.".format(kind))
log.error(emsg + '\n')
raise ValueError(emsg)
role = self.opts.get('id', '')
if not role:
emsg = ("Invalid id.")
log.error(emsg + "\n")
raise ValueError(emsg)
name = "{0}_{1}".format(role, kind)
road_cache = os.path.join(self.opts['cachedir'],
'raet',
name,
'remote')
if os.path.isdir(road_cache):
for road in os.listdir(road_cache):
root, ext = os.path.splitext(road)
if ext not in ['.json', '.msgpack']:
continue
prefix, sep, name = root.partition('.')
if not name or prefix != 'estate':
continue
path = os.path.join(road_cache, road)
with salt.utils.fopen(path, 'rb') as fp_:
if ext == '.json':
data = json.load(fp_)
elif ext == '.msgpack':
data = msgpack.load(fp_)
if data['role'] not in minions:
os.remove(path)
def gen_keys(self):
'''
Use libnacl to generate and safely save a private key
'''
import libnacl.public
d_key = libnacl.dual.DualSecret()
path = '{0}.key'.format(os.path.join(
self.opts['gen_keys_dir'],
self.opts['gen_keys']))
d_key.save(path, 'msgpack')
def check_master(self):
'''
Log if the master is not running
NOT YET IMPLEMENTED
'''
return True
def local_keys(self):
'''
Return a dict of local keys
'''
ret = {'local': []}
fn_ = os.path.join(self.opts['pki_dir'], 'local.key')
if os.path.isfile(fn_):
ret['local'].append(fn_)
return ret
def status(self, minion_id, pub, verify):
'''
Accepts the minion id, device id, curve public and verify keys.
If the key is not present, put it in pending and return "pending",
If the key has been accepted return "accepted"
if the key should be rejected, return "rejected"
'''
acc, pre, rej = self._check_minions_directories() # pylint: disable=W0632
acc_path = os.path.join(acc, minion_id)
pre_path = os.path.join(pre, minion_id)
rej_path = os.path.join(rej, minion_id)
# open mode is turned on, force accept the key
keydata = {
'minion_id': minion_id,
'pub': pub,
'verify': verify}
if self.opts['open_mode']: # always accept and overwrite
with salt.utils.fopen(acc_path, 'w+b') as fp_:
fp_.write(self.serial.dumps(keydata))
return self.ACC
if os.path.isfile(rej_path):
log.debug("Rejection Reason: Keys already rejected.\n")
return self.REJ
elif os.path.isfile(acc_path):
# The minion id has been accepted, verify the key strings
with salt.utils.fopen(acc_path, 'rb') as fp_:
keydata = self.serial.loads(fp_.read())
if keydata['pub'] == pub and keydata['verify'] == verify:
return self.ACC
else:
log.debug("Rejection Reason: Keys not match prior accepted.\n")
return self.REJ
elif os.path.isfile(pre_path):
auto_reject = self.auto_key.check_autoreject(minion_id)
auto_sign = self.auto_key.check_autosign(minion_id)
with salt.utils.fopen(pre_path, 'rb') as fp_:
keydata = self.serial.loads(fp_.read())
if keydata['pub'] == pub and keydata['verify'] == verify:
if auto_reject:
self.reject(minion_id)
log.debug("Rejection Reason: Auto reject pended.\n")
return self.REJ
elif auto_sign:
self.accept(minion_id)
return self.ACC
return self.PEND
else:
log.debug("Rejection Reason: Keys not match prior pended.\n")
return self.REJ
# This is a new key, evaluate auto accept/reject files and place
# accordingly
auto_reject = self.auto_key.check_autoreject(minion_id)
auto_sign = self.auto_key.check_autosign(minion_id)
if self.opts['auto_accept']:
w_path = acc_path
ret = self.ACC
elif auto_sign:
w_path = acc_path
ret = self.ACC
elif auto_reject:
w_path = rej_path
log.debug("Rejection Reason: Auto reject new.\n")
ret = self.REJ
else:
w_path = pre_path
ret = self.PEND
with salt.utils.fopen(w_path, 'w+b') as fp_:
fp_.write(self.serial.dumps(keydata))
return ret
def _get_key_str(self, minion_id, status):
'''
Return the key string in the form of:
pub: <pub>
verify: <verify>
'''
path = os.path.join(self.opts['pki_dir'], status, minion_id)
with salt.utils.fopen(path, 'r') as fp_:
keydata = self.serial.loads(fp_.read())
return 'pub: {0}\nverify: {1}'.format(
keydata['pub'],
keydata['verify'])
def _get_key_finger(self, path):
'''
Return a sha256 kingerprint for the key
'''
with salt.utils.fopen(path, 'r') as fp_:
keydata = self.serial.loads(fp_.read())
key = 'pub: {0}\nverify: {1}'.format(
keydata['pub'],
keydata['verify'])
return hashlib.sha256(key).hexdigest()
def key_str(self, match):
'''
Return the specified public key or keys based on a glob
'''
ret = {}
for status, keys in six.iteritems(self.name_match(match)):
ret[status] = {}
for key in salt.utils.isorted(keys):
ret[status][key] = self._get_key_str(key, status)
return ret
def key_str_all(self):
'''
Return all managed key strings
'''
ret = {}
for status, keys in six.iteritems(self.list_keys()):
ret[status] = {}
for key in salt.utils.isorted(keys):
ret[status][key] = self._get_key_str(key, status)
return ret
def accept(self, match=None, match_dict=None, include_rejected=False):
'''
Accept public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_rejected:
keydirs.append(self.REJ)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
except (IOError, OSError):
pass
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
)
def accept_all(self):
'''
Accept all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.ACC,
key)
)
except (IOError, OSError):
pass
return self.list_keys()
def delete_key(self, match=None, match_dict=None, preserve_minions=False):
'''
Delete public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
for status, keys in six.iteritems(matches):
for key in keys:
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
except (OSError, IOError):
pass
self.check_minion_cache(preserve_minions=matches.get('minions', []))
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
)
def delete_all(self):
'''
Delete all keys
'''
for status, keys in six.iteritems(self.list_keys()):
for key in keys:
try:
os.remove(os.path.join(self.opts['pki_dir'], status, key))
except (OSError, IOError):
pass
self.check_minion_cache()
return self.list_keys()
def reject(self, match=None, match_dict=None, include_accepted=False):
'''
Reject public keys. If "match" is passed, it is evaluated as a glob.
Pre-gathered matches can also be passed via "match_dict".
'''
if match is not None:
matches = self.name_match(match)
elif match_dict is not None and isinstance(match_dict, dict):
matches = match_dict
else:
matches = {}
keydirs = [self.PEND]
if include_accepted:
keydirs.append(self.ACC)
for keydir in keydirs:
for key in matches.get(keydir, []):
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
keydir,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
except (IOError, OSError):
pass
self.check_minion_cache()
return (
self.name_match(match) if match is not None
else self.dict_match(matches)
)
def reject_all(self):
'''
Reject all keys in pre
'''
keys = self.list_keys()
for key in keys[self.PEND]:
try:
shutil.move(
os.path.join(
self.opts['pki_dir'],
self.PEND,
key),
os.path.join(
self.opts['pki_dir'],
self.REJ,
key)
)
except (IOError, OSError):
pass
self.check_minion_cache()
return self.list_keys()
def finger(self, match):
'''
Return the fingerprint for a specified key
'''
matches = self.name_match(match, True)
ret = {}
for status, keys in six.iteritems(matches):
ret[status] = {}
for key in keys:
if status == 'local':
path = os.path.join(self.opts['pki_dir'], key)
else:
path = os.path.join(self.opts['pki_dir'], status, key)
ret[status][key] = self._get_key_finger(path)
return ret
def finger_all(self):
'''
Return fingerprints for all keys
'''
ret = {}
for status, keys in six.iteritems(self.list_keys()):
ret[status] = {}
for key in keys:
if status == 'local':
path = os.path.join(self.opts['pki_dir'], key)
else:
path = os.path.join(self.opts['pki_dir'], status, key)
ret[status][key] = self._get_key_finger(path)
return ret
def read_all_remote(self):
'''
Return a dict of all remote key data
'''
data = {}
for status, mids in six.iteritems(self.list_keys()):
for mid in mids:
keydata = self.read_remote(mid, status)
if keydata:
keydata['acceptance'] = status
data[mid] = keydata
return data
def read_remote(self, minion_id, status=ACC):
'''
Read in a remote key of status
'''
path = os.path.join(self.opts['pki_dir'], status, minion_id)
if not os.path.isfile(path):
return {}
with salt.utils.fopen(path, 'rb') as fp_:
return self.serial.loads(fp_.read())
def read_local(self):
'''
Read in the local private keys, return an empy dict if the keys do not
exist
'''
path = os.path.join(self.opts['pki_dir'], 'local.key')
if not os.path.isfile(path):
return {}
with salt.utils.fopen(path, 'rb') as fp_:
return self.serial.loads(fp_.read())
def write_local(self, priv, sign):
'''
Write the private key and the signing key to a file on disk
'''
keydata = {'priv': priv,
'sign': sign}
path = os.path.join(self.opts['pki_dir'], 'local.key')
c_umask = os.umask(191)
if os.path.exists(path):
#mode = os.stat(path).st_mode
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
with salt.utils.fopen(path, 'w+') as fp_:
fp_.write(self.serial.dumps(keydata))
os.chmod(path, stat.S_IRUSR)
os.umask(c_umask)
def delete_local(self):
'''
Delete the local private key file
'''
path = os.path.join(self.opts['pki_dir'], 'local.key')
if os.path.isfile(path):
os.remove(path)
def delete_pki_dir(self):
'''
Delete the private key directory
'''
path = self.opts['pki_dir']
if os.path.exists(path):
shutil.rmtree(path)
| smallyear/linuxLearn | salt/salt/key.py | Python | apache-2.0 | 48,988 |
"""
WSGI config for brp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from dj_static import Cling
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "brp.settings")
os.environ.setdefault('DJANGO_CONFIGURATION', 'Dev')
application = Cling(get_wsgi_application())
| chop-dbhi/biorepo-portal | brp/wsgi.py | Python | bsd-2-clause | 471 |
"""
recursely
"""
__version__ = "0.1"
__description__ = "Recursive importer for Python submodules"
__author__ = "Karol Kuczmarski"
__license__ = "Simplified BSD"
import sys
from recursely._compat import IS_PY3
from recursely.importer import RecursiveImporter
from recursely.utils import SentinelList
__all__ = ['install']
def install(retroactive=True):
"""Install the recursive import hook in ``sys.meta_path``,
enabling the use of ``__recursive__`` directive.
:param retroactive: Whether the hook should be retroactively applied
to module's that have been imported before
it was installed.
"""
if RecursiveImporter.is_installed():
return
importer = RecursiveImporter()
# because the hook is a catch-all one, we ensure that it's always
# at the very end of ``sys.meta_path``, so that it's tried only if
# no other (more specific) hook has been chosen by Python
if IS_PY3:
for i in reversed(range(len(sys.meta_path))):
ih_module = getattr(sys.meta_path[i], '__module__', '')
is_builtin = ih_module == '_frozen_importlib'
if not is_builtin:
break
sys.meta_path = SentinelList(
sys.meta_path[:i],
sentinels=[importer] + sys.meta_path[i:])
else:
sys.meta_path = SentinelList(sys.meta_path, sentinel=importer)
# look through already imported packages and recursively import
# their submodules, if they contain the ``__recursive__`` directive
if retroactive:
for module in list(sys.modules.values()):
importer.recurse(module)
| Xion/recursely | recursely/__init__.py | Python | bsd-2-clause | 1,663 |
# coding: utf-8
import sys
from setuptools import setup, find_packages
NAME = "pollster"
VERSION = "2.0.2"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil", "pandas >= 0.19.1"]
setup(
name=NAME,
version=VERSION,
description="Pollster API",
author_email="Adam Hooper <[email protected]>",
url="https://github.com/huffpostdata/python-pollster",
keywords=["Pollster API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""Download election-related polling data from Pollster."""
)
| huffpostdata/python-pollster | setup.py | Python | bsd-2-clause | 756 |
import requests
import logging
import redis
from requests.packages.urllib3.exceptions import ConnectionError
from core.serialisers import json
from dss import localsettings
# TODO([email protected]): refactor these out to
# classes to avoid duplicating constants below
HEADERS = {
'content-type': 'application/json'
}
logger = logging.getLogger('spa')
def post_notification(session_id, image, message):
try:
payload = {
'sessionid': session_id,
'image': image,
'message': message
}
data = json.dumps(payload)
r = requests.post(
localsettings.REALTIME_HOST + 'notification',
data=data,
headers=HEADERS
)
if r.status_code == 200:
return ""
else:
return r.text
except ConnectionError:
#should probably implement some sort of retry in here
pass | fergalmoran/dss | core/realtime/notification.py | Python | bsd-2-clause | 928 |
# Copyright (c) 2021, DjaoDjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = '0.6.4-dev'
| djaodjin/djaodjin-deployutils | deployutils/__init__.py | Python | bsd-2-clause | 1,370 |
# Copyright 2014 Dev in Cachu authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.views.decorators import csrf
from django.views.generic import base
from django.contrib import admin
admin.autodiscover()
from devincachu.destaques import views as dviews
from devincachu.inscricao import views as iviews
from devincachu.palestras import views as pviews
p = patterns
urlpatterns = p("",
url(r"^admin/", include(admin.site.urls)),
url(r"^palestrantes/$",
pviews.PalestrantesView.as_view(),
name="palestrantes"),
url(r"^programacao/$",
pviews.ProgramacaoView.as_view(),
name="programacao"),
url(r"^programacao/(?P<palestrantes>.*)/(?P<slug>[\w-]+)/$",
pviews.PalestraView.as_view(),
name="palestra"),
url(r"^inscricao/$",
iviews.Inscricao.as_view(),
name="inscricao"),
url(r"^notificacao/$",
csrf.csrf_exempt(iviews.Notificacao.as_view()),
name="notificacao"),
url(r"^certificado/validar/$",
iviews.ValidacaoCertificado.as_view(),
name="validacao_certificado"),
url(r"^certificado/$",
iviews.BuscarCertificado.as_view(),
name="busca_certificado"),
url(r"^certificado/(?P<slug>[0-9a-f]+)/$",
iviews.Certificado.as_view(),
name="certificado"),
url(r"^sobre/$",
base.TemplateView.as_view(
template_name="sobre.html",
),
name="sobre-o-evento"),
url(r"^quando-e-onde/$",
base.TemplateView.as_view(
template_name="quando-e-onde.html",
),
name="quando-e-onde"),
url(r"^$", dviews.IndexView.as_view(), name="index"),
)
if settings.DEBUG:
urlpatterns += patterns("",
url(r"^media/(?P<path>.*)$",
"django.views.static.serve",
{"document_root": settings.MEDIA_ROOT}),
)
| devincachu/devincachu-2014 | devincachu/urls.py | Python | bsd-2-clause | 2,555 |
#!/usr/bin/env python
#*********************************************************************
# Software License Agreement (BSD License)
#
# Copyright (c) 2011 andrewtron3000
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#********************************************************************/
import roslib; roslib.load_manifest('face_detection')
import rospy
import sys
import cv
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
#
# Instantiate a new opencv to ROS bridge adaptor
#
cv_bridge = CvBridge()
#
# Define the callback that will be called when a new image is received.
#
def callback(publisher, coord_publisher, cascade, imagemsg):
#
# Convert the ROS imagemsg to an opencv image.
#
image = cv_bridge.imgmsg_to_cv(imagemsg, 'mono8')
#
# Blur the image.
#
cv.Smooth(image, image, cv.CV_GAUSSIAN)
#
# Allocate some storage for the haar detect operation.
#
storage = cv.CreateMemStorage(0)
#
# Call the face detector function.
#
faces = cv.HaarDetectObjects(image, cascade, storage, 1.2, 2,
cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))
#
# If faces are detected, compute the centroid of all the faces
# combined.
#
face_centroid_x = 0.0
face_centroid_y = 0.0
if len(faces) > 0:
#
# For each face, draw a rectangle around it in the image,
# and also add the position of the face to the centroid
# of all faces combined.
#
for (i, n) in faces:
x = int(i[0])
y = int(i[1])
width = int(i[2])
height = int(i[3])
cv.Rectangle(image,
(x, y),
(x + width, y + height),
cv.CV_RGB(0,255,0), 3, 8, 0)
face_centroid_x += float(x) + (float(width) / 2.0)
face_centroid_y += float(y) + (float(height) / 2.0)
#
# Finish computing the face_centroid by dividing by the
# number of faces found above.
#
face_centroid_x /= float(len(faces))
face_centroid_y /= float(len(faces))
#
# Lastly, if faces were detected, publish a PointStamped
# message that contains the centroid values.
#
pt = Point(x = face_centroid_x, y = face_centroid_y, z = 0.0)
pt_stamped = PointStamped(point = pt)
coord_publisher.publish(pt_stamped)
#
# Convert the opencv image back to a ROS image using the
# cv_bridge.
#
newmsg = cv_bridge.cv_to_imgmsg(image, 'mono8')
#
# Republish the image. Note this image has boxes around
# faces if faces were found.
#
publisher.publish(newmsg)
def listener(publisher, coord_publisher):
rospy.init_node('face_detector', anonymous=True)
#
# Load the haar cascade. Note we get the
# filename from the "classifier" parameter
# that is configured in the launch script.
#
cascadeFileName = rospy.get_param("~classifier")
cascade = cv.Load(cascadeFileName)
rospy.Subscriber("/stereo/left/image_rect",
Image,
lambda image: callback(publisher, coord_publisher, cascade, image))
rospy.spin()
# This is called first.
if __name__ == '__main__':
publisher = rospy.Publisher('face_view', Image)
coord_publisher = rospy.Publisher('face_coords', PointStamped)
listener(publisher, coord_publisher)
| andrewtron3000/hacdc-ros-pkg | face_detection/src/detector.py | Python | bsd-2-clause | 5,077 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import urllib
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from jp.ac.kyoto_su.aokilab.dragon.mvc.model import OpenGLModel
from jp.ac.kyoto_su.aokilab.dragon.mvc.view import *
from jp.ac.kyoto_su.aokilab.dragon.opengl.triangle import OpenGLTriangle
from jp.ac.kyoto_su.aokilab.dragon.opengl.polygon import OpenGLPolygon
TRACE = True
DEBUG = False
class DragonModel(OpenGLModel):
"""ドラゴンのモデル。"""
def __init__(self):
"""ドラゴンのモデルのコンストラクタ。"""
if TRACE: print(__name__), self.__init__.__doc__
super(DragonModel, self).__init__()
self._eye_point = [-5.5852450791872 , 3.07847342734 , 15.794105252496]
self._sight_point = [0.27455347776413 , 0.20096999406815 , -0.11261999607086]
self._up_vector = [0.1018574904194 , 0.98480906061847 , -0.14062775604137]
self._fovy = self._default_fovy = 12.642721790235
filename = os.path.join(os.getcwd(), 'dragon.txt')
if os.path.exists(filename) and os.path.isfile(filename):
pass
else:
url = 'http://www.cc.kyoto-su.ac.jp/~atsushi/Programs/Dragon/dragon.txt'
urllib.urlretrieve(url, filename)
with open(filename, "rU") as a_file:
while True:
a_string = a_file.readline()
if len(a_string) == 0: break
a_list = a_string.split()
if len(a_list) == 0: continue
first_string = a_list[0]
if first_string == "number_of_vertexes":
number_of_vertexes = int(a_list[1])
if first_string == "number_of_triangles":
number_of_triangles = int(a_list[1])
if first_string == "end_header":
get_tokens = (lambda file: file.readline().split())
collection_of_vertexes = []
for n_th in range(number_of_vertexes):
a_list = get_tokens(a_file)
a_vertex = map(float, a_list[0:3])
collection_of_vertexes.append(a_vertex)
index_to_vertex = (lambda index: collection_of_vertexes[index-1])
for n_th in range(number_of_triangles):
a_list = get_tokens(a_file)
indexes = map(int, a_list[0:3])
vertexes = map(index_to_vertex, indexes)
a_tringle = OpenGLTriangle(*vertexes)
self._display_object.append(a_tringle)
return
def default_window_title(self):
"""ドラゴンのウィンドウのタイトル(ラベル)を応答する。"""
if TRACE: print(__name__), self.default_window_title.__doc__
return "Dragon"
class WaspModel(OpenGLModel):
"""スズメバチのモデル。"""
def __init__(self):
"""スズメバチのモデルのコンストラクタ。"""
if TRACE: print(__name__), self.__init__.__doc__
super(WaspModel, self).__init__()
self._eye_point = [-5.5852450791872 , 3.07847342734 , 15.794105252496]
self._sight_point = [0.19825005531311 , 1.8530999422073 , -0.63795006275177]
self._up_vector = [0.070077999093727 , 0.99630606032682 , -0.049631725731267]
self._fovy = self._default_fovy = 41.480099231656
filename = os.path.join(os.getcwd(), 'wasp.txt')
if os.path.exists(filename) and os.path.isfile(filename):
pass
else:
url = 'http://www.cc.kyoto-su.ac.jp/~atsushi/Programs/Wasp/wasp.txt'
urllib.urlretrieve(url, filename)
with open(filename, "rU") as a_file:
while True:
a_string = a_file.readline()
if len(a_string) == 0: break
a_list = a_string.split()
if len(a_list) == 0: continue
first_string = a_list[0]
if first_string == "number_of_vertexes":
number_of_vertexes = int(a_list[1])
if first_string == "number_of_polygons":
number_of_polygons = int(a_list[1])
if first_string == "end_header":
get_tokens = (lambda file: file.readline().split())
collection_of_vertexes = []
for n_th in range(number_of_vertexes):
a_list = get_tokens(a_file)
a_vertex = map(float, a_list[0:3])
collection_of_vertexes.append(a_vertex)
index_to_vertex = (lambda index: collection_of_vertexes[index-1])
for n_th in range(number_of_polygons):
a_list = get_tokens(a_file)
number_of_indexes = int(a_list[0])
index = number_of_indexes + 1
indexes = map(int, a_list[1:index])
vertexes = map(index_to_vertex, indexes)
rgb_color = map(float, a_list[index:index+3])
a_polygon = OpenGLPolygon(vertexes, rgb_color)
self._display_object.append(a_polygon)
return
def default_view_class(self):
"""スズメバチのモデルを表示するデフォルトのビューのクラスを応答する。"""
if TRACE: print(__name__), self.default_view_class.__doc__
return WaspView
def default_window_title(self):
"""スズメバチのウィンドウのタイトル(ラベル)を応答する。"""
if TRACE: print(__name__), self.default_window_title.__doc__
return "Wasp"
class BunnyModel(OpenGLModel):
"""うさぎのモデル。"""
def __init__(self):
"""うさぎのモデルのコンストラクタ。"""
if TRACE: print(__name__), self.__init__.__doc__
super(BunnyModel, self).__init__()
filename = os.path.join(os.getcwd(), 'bunny.ply')
if os.path.exists(filename) and os.path.isfile(filename):
pass
else:
url = 'http://www.cc.kyoto-su.ac.jp/~atsushi/Programs/Bunny/bunny.ply'
urllib.urlretrieve(url, filename)
with open(filename, "rU") as a_file:
while True:
a_string = a_file.readline()
if len(a_string) == 0: break
a_list = a_string.split()
if len(a_list) == 0: continue
first_string = a_list[0]
if first_string == "element":
second_string = a_list[1]
if second_string == "vertex":
number_of_vertexes = int(a_list[2])
if second_string == "face":
number_of_faces = int(a_list[2])
if first_string == "end_header":
get_tokens = (lambda file: file.readline().split())
collection_of_vertexes = []
for n_th in range(number_of_vertexes):
a_list = get_tokens(a_file)
a_vertex = map(float, a_list[0:3])
collection_of_vertexes.append(a_vertex)
index_to_vertex = (lambda index: collection_of_vertexes[index])
for n_th in range(number_of_faces):
a_list = get_tokens(a_file)
indexes = map(int, a_list[1:4])
vertexes = map(index_to_vertex, indexes)
a_tringle = OpenGLTriangle(*vertexes)
self._display_object.append(a_tringle)
if first_string == "comment":
second_string = a_list[1]
if second_string == "eye_point_xyz":
self._eye_point = map(float, a_list[2:5])
if second_string == "sight_point_xyz":
self._sight_point = map(float, a_list[2:5])
if second_string == "up_vector_xyz":
self._up_vector = map(float, a_list[2:5])
if second_string == "zoom_height" and a_list[3] == "fovy":
self._fovy = self._default_fovy = float(a_list[4])
return
def default_view_class(self):
"""うさぎのモデルを表示するデフォルトのビューのクラスを応答する。"""
if TRACE: print(__name__), self.default_view_class.__doc__
return BunnyView
def default_window_title(self):
"""うさぎのウィンドウのタイトル(ラベル)を応答する。"""
if TRACE: print(__name__), self.default_window_title.__doc__
return "Stanford Bunny"
# end of file | frederica07/Dragon_Programming_Process | jp/ac/kyoto_su/aokilab/dragon/example_model/example.py | Python | bsd-2-clause | 7,196 |
# Copyright 2015-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from lino.core.roles import UserRole
class SimpleContactsUser(UserRole):
pass
class ContactsUser(SimpleContactsUser):
pass
class ContactsStaff(ContactsUser):
pass
| khchine5/xl | lino_xl/lib/contacts/roles.py | Python | bsd-2-clause | 269 |
from django.db.models import Transform
from django.db.models import DateTimeField, TimeField
from django.utils.functional import cached_property
class TimeValue(Transform):
lookup_name = 'time'
function = 'time'
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'TIME({})'.format(lhs), params
@cached_property
def output_field(self):
return TimeField()
DateTimeField.register_lookup(TimeValue) | mivanov-utwente/t4proj | t4proj/apps/stats/models.py | Python | bsd-2-clause | 481 |
#!/usr/bin/env python
__author__ = 'Adam R. Smith, Michael Meisinger, Dave Foster <[email protected]>'
import threading
import traceback
import gevent
from gevent import greenlet, Timeout
from gevent.event import Event, AsyncResult
from gevent.queue import Queue
from pyon.core import MSG_HEADER_ACTOR
from pyon.core.bootstrap import CFG
from pyon.core.exception import IonException, ContainerError
from pyon.core.exception import Timeout as IonTimeout
from pyon.core.thread import PyonThreadManager, PyonThread, ThreadManager, PyonThreadTraceback, PyonHeartbeatError
from pyon.datastore.postgresql.pg_util import init_db_stats, get_db_stats, clear_db_stats
from pyon.ion.service import BaseService
from pyon.util.containers import get_ion_ts, get_ion_ts_millis
from pyon.util.log import log
STAT_INTERVAL_LENGTH = 60000 # Interval time for process saturation stats collection
stats_callback = None
class OperationInterruptedException(BaseException):
"""
Interrupted exception. Used by external items timing out execution in the
IonProcessThread's control thread.
Derived from BaseException to specifically avoid try/except Exception blocks,
such as in Publisher's publish_event.
"""
pass
class IonProcessError(StandardError):
pass
class IonProcessThread(PyonThread):
"""
The control part of an ION process.
"""
def __init__(self, target=None, listeners=None, name=None, service=None, cleanup_method=None,
heartbeat_secs=10, **kwargs):
"""
Constructs the control part of an ION process.
Used by the container's IonProcessThreadManager, as part of spawn_process.
@param target A callable to run in the PyonThread. If None (typical), will use the target method
defined in this class.
@param listeners A list of listening endpoints attached to this thread.
@param name The name of this ION process.
@param service An instance of the BaseService derived class which contains the business logic for
the ION process.
@param cleanup_method An optional callable to run when the process is stopping. Runs after all other
notify_stop calls have run. Should take one param, this instance.
@param heartbeat_secs Number of seconds to wait in between heartbeats.
"""
self._startup_listeners = listeners or []
self.listeners = []
self._listener_map = {}
self.name = name
self.service = service
self._cleanup_method = cleanup_method
self.thread_manager = ThreadManager(failure_notify_callback=self._child_failed) # bubbles up to main thread manager
self._dead_children = [] # save any dead children for forensics
self._ctrl_thread = None
self._ctrl_queue = Queue()
self._ready_control = Event()
self._errors = []
self._ctrl_current = None # set to the AR generated by _routing_call when in the context of a call
# processing vs idle time (ms)
self._start_time = None
self._proc_time = 0 # busy time since start
self._proc_time_prior = 0 # busy time at the beginning of the prior interval
self._proc_time_prior2 = 0 # busy time at the beginning of 2 interval's ago
self._proc_interval_num = 0 # interval num of last record
# for heartbeats, used to detect stuck processes
self._heartbeat_secs = heartbeat_secs # amount of time to wait between heartbeats
self._heartbeat_stack = None # stacktrace of last heartbeat
self._heartbeat_time = None # timestamp of heart beat last matching the current op
self._heartbeat_op = None # last operation (by AR)
self._heartbeat_count = 0 # number of times this operation has been seen consecutively
self._log_call_exception = CFG.get_safe("container.process.log_exceptions", False)
self._log_call_dbstats = CFG.get_safe("container.process.log_dbstats", False)
self._warn_call_dbstmt_threshold = CFG.get_safe("container.process.warn_dbstmt_threshold", 0)
PyonThread.__init__(self, target=target, **kwargs)
def heartbeat(self):
"""
Returns a 3-tuple indicating everything is ok.
Should only be called after the process has been started.
Checks the following:
- All attached endpoints are alive + listening (this means ready)
- The control flow greenlet is alive + listening or processing
@return 3-tuple indicating (listeners ok, ctrl thread ok, heartbeat status). Use all on it for a
boolean indication of success.
"""
listeners_ok = True
for l in self.listeners:
if not (l in self._listener_map and not self._listener_map[l].proc.dead and l.get_ready_event().is_set()):
listeners_ok = False
ctrl_thread_ok = self._ctrl_thread.running
# are we currently processing something?
heartbeat_ok = True
if self._ctrl_current is not None:
st = traceback.extract_stack(self._ctrl_thread.proc.gr_frame)
if self._ctrl_current == self._heartbeat_op:
if st == self._heartbeat_stack:
self._heartbeat_count += 1 # we've seen this before! increment count
# we've been in this for the last X ticks, or it's been X seconds, fail this part of the heartbeat
if self._heartbeat_count > CFG.get_safe('container.timeout.heartbeat_proc_count_threshold', 30) or \
get_ion_ts_millis() - int(self._heartbeat_time) >= CFG.get_safe('container.timeout.heartbeat_proc_time_threshold', 30) * 1000:
heartbeat_ok = False
else:
# it's made some progress
self._heartbeat_count = 1
self._heartbeat_stack = st
self._heartbeat_time = get_ion_ts()
else:
self._heartbeat_op = self._ctrl_current
self._heartbeat_count = 1
self._heartbeat_time = get_ion_ts()
self._heartbeat_stack = st
else:
self._heartbeat_op = None
self._heartbeat_count = 0
#log.debug("%s %s %s", listeners_ok, ctrl_thread_ok, heartbeat_ok)
return (listeners_ok, ctrl_thread_ok, heartbeat_ok)
@property
def time_stats(self):
"""
Returns a 5-tuple of (total time, idle time, processing time, time since prior interval start,
busy since prior interval start), all in ms (int).
"""
now = get_ion_ts_millis()
running_time = now - self._start_time
idle_time = running_time - self._proc_time
cur_interval = now / STAT_INTERVAL_LENGTH
now_since_prior = now - (cur_interval - 1) * STAT_INTERVAL_LENGTH
if cur_interval == self._proc_interval_num:
proc_time_since_prior = self._proc_time-self._proc_time_prior2
elif cur_interval-1 == self._proc_interval_num:
proc_time_since_prior = self._proc_time-self._proc_time_prior
else:
proc_time_since_prior = 0
return (running_time, idle_time, self._proc_time, now_since_prior, proc_time_since_prior)
def _child_failed(self, child):
"""
Callback from gevent as set in the TheadManager, when a child greenlet fails.
Kills the ION process main greenlet. This propagates the error up to the process supervisor.
"""
# remove the child from the list of children (so we can shut down cleanly)
for x in self.thread_manager.children:
if x.proc == child:
self.thread_manager.children.remove(x)
break
self._dead_children.append(child)
# kill this process's main greenlet. This should be noticed by the container's proc manager
self.proc.kill(child.exception)
def add_endpoint(self, listener, activate=True):
"""
Adds a listening endpoint to be managed by this ION process.
Spawns the listen loop and sets the routing call to synchronize incoming messages
here. If this process hasn't been started yet, adds it to the list of listeners
to start on startup.
@param activate If True (default), start consuming from listener
"""
if self.proc:
listener.routing_call = self._routing_call
if self.name:
svc_name = "unnamed-service"
if self.service is not None and hasattr(self.service, 'name'):
svc_name = self.service.name
listen_thread_name = "%s-%s-listen-%s" % (svc_name, self.name, len(self.listeners)+1)
else:
listen_thread_name = "unknown-listener-%s" % (len(self.listeners)+1)
listen_thread = self.thread_manager.spawn(listener.listen, thread_name=listen_thread_name, activate=activate)
listen_thread.proc._glname = "ION Proc listener %s" % listen_thread_name
self._listener_map[listener] = listen_thread
self.listeners.append(listener)
else:
self._startup_listeners.append(listener)
def remove_endpoint(self, listener):
"""
Removes a listening endpoint from management by this ION process.
If the endpoint is unknown to this ION process, raises an error.
@return The PyonThread running the listen loop, if it exists. You are
responsible for closing it when appropriate.
"""
if listener in self.listeners:
self.listeners.remove(listener)
return self._listener_map.pop(listener)
elif listener in self._startup_listeners:
self._startup_listeners.remove(listener)
return None
else:
raise IonProcessError("Cannot remove unrecognized listener: %s" % listener)
def target(self, *args, **kwargs):
"""
Entry point for the main process greenlet.
Setup the base properties for this process (mainly the control thread).
"""
if self.name:
threading.current_thread().name = "%s-target" % self.name
# start time
self._start_time = get_ion_ts_millis()
self._proc_interval_num = self._start_time / STAT_INTERVAL_LENGTH
# spawn control flow loop
self._ctrl_thread = self.thread_manager.spawn(self._control_flow)
self._ctrl_thread.proc._glname = "ION Proc CL %s" % self.name
# wait on control flow loop, heartbeating as appropriate
while not self._ctrl_thread.ev_exit.wait(timeout=self._heartbeat_secs):
hbst = self.heartbeat()
if not all(hbst):
log.warn("Heartbeat status for process %s returned %s", self, hbst)
if self._heartbeat_stack is not None:
stack_out = "".join(traceback.format_list(self._heartbeat_stack))
else:
stack_out = "N/A"
#raise PyonHeartbeatError("Heartbeat failed: %s, stacktrace:\n%s" % (hbst, stack_out))
log.warn("Heartbeat failed: %s, stacktrace:\n%s", hbst, stack_out)
# this is almost a no-op as we don't fall out of the above loop without
# exiting the ctrl_thread, but having this line here makes testing much easier.
self._ctrl_thread.join()
def _routing_call(self, call, context, *callargs, **callkwargs):
"""
Endpoints call into here to synchronize across the entire IonProcess.
Returns immediately with an AsyncResult that can be waited on. Calls
are made by the loop in _control_flow. We pass in the calling greenlet so
exceptions are raised in the correct context.
@param call The call to be made within this ION processes' calling greenlet.
@param callargs The keyword args to pass to the call.
@param context Optional process-context (usually the headers of the incoming call) to be
set. Process-context is greenlet-local, and since we're crossing greenlet
boundaries, we must set it again in the ION process' calling greenlet.
"""
ar = AsyncResult()
if len(callargs) == 0 and len(callkwargs) == 0:
log.trace("_routing_call got no arguments for the call %s, check your call's parameters", call)
self._ctrl_queue.put((greenlet.getcurrent(), ar, call, callargs, callkwargs, context))
return ar
def has_pending_call(self, ar):
"""
Returns true if the call (keyed by the AsyncResult returned by _routing_call) is still pending.
"""
for _, qar, _, _, _, _ in self._ctrl_queue.queue:
if qar == ar:
return True
return False
def _cancel_pending_call(self, ar):
"""
Cancels a pending call (keyed by the AsyncResult returend by _routing_call).
@return True if the call was truly pending.
"""
if self.has_pending_call(ar):
ar.set(False)
return True
return False
def _interrupt_control_thread(self):
"""
Signal the control flow thread that it needs to abort processing, likely due to a timeout.
"""
self._ctrl_thread.proc.kill(exception=OperationInterruptedException, block=False)
def cancel_or_abort_call(self, ar):
"""
Either cancels a future pending call, or aborts the current processing if the given AR is unset.
The pending call is keyed by the AsyncResult returned by _routing_call.
"""
if not self._cancel_pending_call(ar) and not ar.ready():
self._interrupt_control_thread()
def _control_flow(self):
"""
Entry point for process control thread of execution.
This method is run by the control greenlet for each ION process. Listeners attached
to the process, either RPC Servers or Subscribers, synchronize calls to the process
by placing call requests into the queue by calling _routing_call.
This method blocks until there are calls to be made in the synchronized queue, and
then calls from within this greenlet. Any exception raised is caught and re-raised
in the greenlet that originally scheduled the call. If successful, the AsyncResult
created at scheduling time is set with the result of the call.
"""
svc_name = getattr(self.service, "name", "unnamed-service") if self.service else "unnamed-service"
proc_id = getattr(self.service, "id", "unknown-pid") if self.service else "unknown-pid"
if self.name:
threading.current_thread().name = "%s-%s" % (svc_name, self.name)
thread_base_name = threading.current_thread().name
self._ready_control.set()
for calltuple in self._ctrl_queue:
calling_gl, ar, call, callargs, callkwargs, context = calltuple
request_id = (context or {}).get("request-id", None)
if request_id:
threading.current_thread().name = thread_base_name + "-" + str(request_id)
#log.debug("control_flow making call: %s %s %s (has context: %s)", call, callargs, callkwargs, context is not None)
res = None
start_proc_time = get_ion_ts_millis()
self._record_proc_time(start_proc_time)
# check context for expiration
if context is not None and 'reply-by' in context:
if start_proc_time >= int(context['reply-by']):
log.info("control_flow: attempting to process message already exceeding reply-by, ignore")
# raise a timeout in the calling thread to allow endpoints to continue processing
e = IonTimeout("Reply-by time has already occurred (reply-by: %s, op start time: %s)" % (context['reply-by'], start_proc_time))
calling_gl.kill(exception=e, block=False)
continue
# If ar is set, means it is cancelled
if ar.ready():
log.info("control_flow: attempting to process message that has been cancelled, ignore")
continue
init_db_stats()
try:
# ******************************************************************
# ****** THIS IS WHERE THE RPC OPERATION/SERVICE CALL IS MADE ******
with self.service.push_context(context), \
self.service.container.context.push_context(context):
self._ctrl_current = ar
res = call(*callargs, **callkwargs)
# ****** END CALL, EXCEPTION HANDLING FOLLOWS ******
# ******************************************************************
except OperationInterruptedException:
# endpoint layer takes care of response as it's the one that caused this
log.debug("Operation interrupted")
pass
except Exception as e:
if self._log_call_exception:
log.exception("PROCESS exception: %s" % e.message)
# Raise the exception in the calling greenlet.
# Try decorating the args of the exception with the true traceback -
# this should be reported by ThreadManager._child_failed
exc = PyonThreadTraceback("IonProcessThread _control_flow caught an exception "
"(call: %s, *args %s, **kwargs %s, context %s)\n"
"True traceback captured by IonProcessThread' _control_flow:\n\n%s" % (
call, callargs, callkwargs, context, traceback.format_exc()))
e.args = e.args + (exc,)
if isinstance(e, (TypeError, IonException)):
# Pass through known process exceptions, in particular IonException
calling_gl.kill(exception=e, block=False)
else:
# Otherwise, wrap unknown, forward and hopefully we can continue on our way
self._errors.append((call, callargs, callkwargs, context, e, exc))
log.warn(exc)
log.warn("Attempting to continue...")
# Note: Too large exception string will crash the container (when passed on as msg header).
exception_str = str(exc)
if len(exception_str) > 10000:
exception_str = (
"Exception string representation too large. "
"Begin and end of the exception:\n"
+ exception_str[:2000] + "\n...\n" + exception_str[-2000:]
)
calling_gl.kill(exception=ContainerError(exception_str), block=False)
finally:
try:
# Compute statistics
self._compute_proc_stats(start_proc_time)
db_stats = get_db_stats()
if db_stats:
if self._warn_call_dbstmt_threshold > 0 and db_stats.get("count.all", 0) >= self._warn_call_dbstmt_threshold:
stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
log.warn("PROC_OP '%s.%s' EXCEEDED DB THRESHOLD. stats=%s", svc_name, call.__name__, stats_str)
elif self._log_call_dbstats:
stats_str = ", ".join("{}={}".format(k, db_stats[k]) for k in sorted(db_stats.keys()))
log.info("PROC_OP '%s.%s' DB STATS: %s", svc_name, call.__name__, stats_str)
clear_db_stats()
if stats_callback:
stats_callback(proc_id=proc_id, proc_name=self.name, svc=svc_name, op=call.__name__,
request_id=request_id, context=context,
db_stats=db_stats, proc_stats=self.time_stats, result=res, exc=None)
except Exception:
log.exception("Error computing process call stats")
self._ctrl_current = None
threading.current_thread().name = thread_base_name
# Set response in AsyncEvent of caller (endpoint greenlet)
ar.set(res)
def _record_proc_time(self, cur_time):
""" Keep the _proc_time of the prior and prior-prior intervals for stats computation
"""
cur_interval = cur_time / STAT_INTERVAL_LENGTH
if cur_interval == self._proc_interval_num:
# We're still in the same interval - no update
pass
elif cur_interval-1 == self._proc_interval_num:
# Record the stats from the prior interval
self._proc_interval_num = cur_interval
self._proc_time_prior2 = self._proc_time_prior
self._proc_time_prior = self._proc_time
elif cur_interval-1 > self._proc_interval_num:
# We skipped an entire interval - everything is prior2
self._proc_interval_num = cur_interval
self._proc_time_prior2 = self._proc_time
self._proc_time_prior = self._proc_time
def _compute_proc_stats(self, start_proc_time):
cur_time = get_ion_ts_millis()
self._record_proc_time(cur_time)
proc_time = cur_time - start_proc_time
self._proc_time += proc_time
def start_listeners(self):
"""
Starts all listeners in managed greenlets.
Usually called by the ProcManager, unless using IonProcess manually.
"""
try:
# disable normal error reporting, this method should only be called from startup
self.thread_manager._failure_notify_callback = None
# spawn all listeners in startup listeners (from initializer, or added later)
for listener in self._startup_listeners:
self.add_endpoint(listener)
with Timeout(seconds=CFG.get_safe('container.messaging.timeout.start_listener', 30)):
gevent.wait([x.get_ready_event() for x in self.listeners])
except Timeout:
# remove failed endpoints before reporting failure above
for listener, proc in self._listener_map.iteritems():
if proc.proc.dead:
log.info("removed dead listener: %s", listener)
self.listeners.remove(listener)
self.thread_manager.children.remove(proc)
raise IonProcessError("start_listeners did not complete in expected time")
finally:
self.thread_manager._failure_notify_callback = self._child_failed
def _notify_stop(self):
"""
Called when the process is about to be shut down.
Instructs all listeners to close, puts a StopIteration into the synchronized queue,
and waits for the listeners to close and for the control queue to exit.
"""
for listener in self.listeners:
try:
listener.close()
except Exception as ex:
tb = traceback.format_exc()
log.warn("Could not close listener, attempting to ignore: %s\nTraceback:\n%s", ex, tb)
self._ctrl_queue.put(StopIteration)
# wait_children will join them and then get() them, which may raise an exception if any of them
# died with an exception.
self.thread_manager.wait_children(30)
PyonThread._notify_stop(self)
# run the cleanup method if we have one
if self._cleanup_method is not None:
try:
self._cleanup_method(self)
except Exception as ex:
log.warn("Cleanup method error, attempting to ignore: %s\nTraceback: %s", ex, traceback.format_exc())
def get_ready_event(self):
"""
Returns an Event that is set when the control greenlet is up and running.
"""
return self._ready_control
class IonProcessThreadManager(PyonThreadManager):
def _create_thread(self, target=None, **kwargs):
return IonProcessThread(target=target, heartbeat_secs=self.heartbeat_secs, **kwargs)
# ---------------------------------------------------------------------------------------------------
# Process type variants
class StandaloneProcess(BaseService):
"""
A process is an ION process of type "standalone" that has an incoming messaging
attachment for the process and operations as defined in a service YML.
"""
process_type = "standalone"
class SimpleProcess(BaseService):
"""
A simple process is an ION process of type "simple" that has no incoming messaging
attachment.
"""
process_type = "simple"
class ImmediateProcess(BaseService):
"""
An immediate process is an ION process of type "immediate" that does its action in
the on_init and on_start hooks, and that it terminated immediately after completion.
Has no messaging attachment.
"""
process_type = "immediate"
class StreamProcess(BaseService):
"""
Base class for a stream process.
Such a process handles a sequence of otherwise unconstrained messages, resulting from a
subscription. There are no operations.
"""
process_type = "stream_process"
def call_process(self, message, stream_route, stream_id):
"""
Handles pre-processing of packet and process work
"""
self.process(message)
def process(self, message):
"""
Process a message as arriving based on a subscription.
"""
pass
# ---------------------------------------------------------------------------------------------------
# Process helpers
def get_ion_actor_id(process):
"""Given an ION process, return the ion-actor-id from the context, if set and present"""
ion_actor_id = None
if process:
ctx = process.get_context()
ion_actor_id = ctx.get(MSG_HEADER_ACTOR, None) if ctx else None
return ion_actor_id
def set_process_stats_callback(stats_cb):
""" Sets a callback function (hook) to push stats after a process operation call. """
global stats_callback
if stats_cb is None:
pass
elif stats_callback:
log.warn("Stats callback already defined")
stats_callback = stats_cb
| scionrep/scioncc | src/pyon/ion/process.py | Python | bsd-2-clause | 27,034 |
# Author: Nick Raptis <[email protected]>
"""
Module for listing commands and help.
"""
from basemodule import BaseModule, BaseCommandContext
from alternatives import _
class HelpContext(BaseCommandContext):
def cmd_list(self, argument):
"""List commands"""
arg = argument.lower()
index = self.bot.help_index
public = "public commands -- %s" % " ".join(index['public'])
private = "private commands -- %s" % " ".join(index['private'])
if 'all' in arg or 'both' in arg:
output = "\n".join((public, private))
elif 'pub' in arg or self.target.startswith('#'):
output = public
elif 'priv' in arg or not self.target.startswith('#'):
output = private
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
self.send(self.target, output)
def cmd_modules(self, argument):
"""List active modules"""
index = self.bot.help_index
output = "active modules -- %s" % " ".join(index['modules'].keys())
self.send(self.target, output)
def cmd_help(self, argument):
"""Get help on a command or module"""
arg = argument.lower()
index = self.bot.help_index
target = self.target
args = arg.split()
if not args:
s = "usage: help <command> [public|private] / help module <module>"
self.send(target, s)
elif args[0] == 'module':
args.pop(0)
if not args:
self.send(target, "usage: help module <module>")
else:
help_item = index['modules'].get(args[0])
if help_item:
self.send(target, help_item['summary'])
else:
self.send(target, _("No help for %s"), args[0])
else:
args.append("")
cmd = args.pop(0)
cmd_type = args.pop(0)
if 'pu' in cmd_type or self.target.startswith('#'):
cmd_type = 'public'
elif 'pr' in cmd_type or not self.target.startswith('#'):
cmd_type = 'private'
else:
# we shouldn't be here
self.logger.error("cmd_list")
return
help_item = index[cmd_type].get(cmd)
if help_item:
self.send(target, index[cmd_type][cmd]['summary'])
else:
self.send(target, _("No help for %s"), cmd)
class HelpModule(BaseModule):
context_class = HelpContext
module = HelpModule
| nickraptis/fidibot | src/modules/help.py | Python | bsd-2-clause | 2,615 |
#!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| jaeilepp/eggie | eggie.py | Python | bsd-2-clause | 1,903 |
# -*- coding: utf-8 -*-
"""
Pygments HTML formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import unittest
import StringIO
import tempfile
from os.path import join, dirname, isfile, abspath
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
import support
TESTFILE, TESTDIR = support.location(__file__)
tokensource = list(PythonLexer(encoding='utf-8').get_tokens(open(TESTFILE).read()))
class HtmlFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = HtmlFormatter(nowrap=True)
houtfile = StringIO.StringIO()
hfmt.format(tokensource, houtfile)
nfmt = NullFormatter()
noutfile = StringIO.StringIO()
nfmt.format(tokensource, noutfile)
stripped_html = re.sub('<.*?>', '', houtfile.getvalue())
escaped_text = escape_html(noutfile.getvalue())
self.assertEquals(stripped_html, escaped_text)
def test_external_css(self):
# test correct behavior
# CSS should be in /tmp directory
fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
# CSS should be in TESTDIR (TESTDIR is absolute)
fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
outencoding='utf-8')
tfile = tempfile.NamedTemporaryFile(suffix='.html')
fmt1.format(tokensource, tfile)
try:
fmt2.format(tokensource, tfile)
self.assert_(isfile(join(TESTDIR, 'fmt2.css')))
except IOError:
# test directory not writable
pass
tfile.close()
self.assert_(isfile(join(dirname(tfile.name), 'fmt1.css')))
os.unlink(join(dirname(tfile.name), 'fmt1.css'))
try:
os.unlink(join(TESTDIR, 'fmt2.css'))
except OSError:
pass
def test_all_options(self):
for optdict in [dict(nowrap=True),
dict(linenos=True),
dict(linenos=True, full=True),
dict(linenos=True, full=True, noclasses=True)]:
outfile = StringIO.StringIO()
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
def test_valid_output(self):
# test all available wrappers
fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
outencoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
try:
try:
import subprocess
ret = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
stdout=subprocess.PIPE).wait()
except ImportError:
# Python 2.3 - no subprocess module
ret = os.popen('nsgmls -s -c "%s" "%s"' % (catname, pathname)).close()
if ret == 32512: raise OSError # not found
except OSError:
# nsgmls not available
pass
else:
self.failIf(ret, 'nsgmls run reported errors')
os.unlink(pathname)
def test_get_style_defs(self):
fmt = HtmlFormatter()
sd = fmt.get_style_defs()
self.assert_(sd.startswith('.'))
fmt = HtmlFormatter(cssclass='foo')
sd = fmt.get_style_defs()
self.assert_(sd.startswith('.foo'))
sd = fmt.get_style_defs('.bar')
self.assert_(sd.startswith('.bar'))
sd = fmt.get_style_defs(['.bar', '.baz'])
fl = sd.splitlines()[0]
self.assert_('.bar' in fl and '.baz' in fl)
def test_unicode_options(self):
fmt = HtmlFormatter(title=u'Föö',
cssclass=u'bär',
cssstyles=u'div:before { content: \'bäz\' }',
encoding='utf-8')
handle, pathname = tempfile.mkstemp('.html')
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
| erickt/pygments | tests/test_html_formatter.py | Python | bsd-2-clause | 4,348 |
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import itertools
import numba
import numpy as np
import os
import pandas as pd
import pyarrow.parquet as pq
import random
import string
import unittest
from numba import types
import sdc
from sdc import hiframes
from sdc.str_arr_ext import StringArray
from sdc.tests.test_base import TestCase
from sdc.tests.test_utils import (count_array_OneDs,
count_array_REPs,
count_parfor_OneDs,
count_parfor_REPs,
dist_IR_contains,
get_start_end,
skip_numba_jit,
skip_sdc_jit)
class TestHiFrames(TestCase):
@skip_numba_jit
def test_column_list_select2(self):
# make sure SDC copies the columns like Pandas does
def test_impl(df):
df2 = df[['A']]
df2['A'] += 10
return df2.A, df.A
hpat_func = self.jit(test_impl)
n = 11
df = pd.DataFrame(
{'A': np.arange(n), 'B': np.ones(n), 'C': np.random.ranf(n)})
np.testing.assert_array_equal(hpat_func(df.copy())[1], test_impl(df)[1])
@skip_numba_jit
def test_pd_DataFrame_from_series_par(self):
def test_impl(n):
S1 = pd.Series(np.ones(n))
S2 = pd.Series(np.random.ranf(n))
df = pd.DataFrame({'A': S1, 'B': S2})
return df.A.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
@skip_numba_jit
def test_getitem_bool_series(self):
def test_impl(df):
return df['A'][df['B']].values
hpat_func = self.jit(test_impl)
df = pd.DataFrame({'A': [1, 2, 3], 'B': [True, False, True]})
np.testing.assert_array_equal(test_impl(df), hpat_func(df))
@skip_numba_jit
def test_fillna(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
B = df.A.fillna(5.0)
return B.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_fillna_inplace(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
df.A.fillna(5.0, inplace=True)
return df.A.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_column_mean(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = np.nan
df = pd.DataFrame({'A': A})
return df.A.mean()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@skip_numba_jit
def test_column_var(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.var()
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
@skip_numba_jit
def test_column_std(self):
def test_impl():
A = np.array([1., 2., 3.])
A[0] = 4.0
df = pd.DataFrame({'A': A})
return df.A.std()
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(), test_impl())
@skip_numba_jit
def test_column_map(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
df['B'] = df.A.map(lambda a: 2 * a)
return df.B.sum()
n = 121
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_column_map_arg(self):
def test_impl(df):
df['B'] = df.A.map(lambda a: 2 * a)
return
n = 121
df1 = pd.DataFrame({'A': np.arange(n)})
df2 = pd.DataFrame({'A': np.arange(n)})
hpat_func = self.jit(test_impl)
hpat_func(df1)
self.assertTrue(hasattr(df1, 'B'))
test_impl(df2)
np.testing.assert_equal(df1.B.values, df2.B.values)
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_cumsum(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df.A.cumsum()
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_array_OneDs(), 2)
self.assertEqual(count_parfor_REPs(), 0)
self.assertEqual(count_parfor_OneDs(), 2)
self.assertTrue(dist_IR_contains('dist_cumsum'))
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_column_distribution(self):
# make sure all column calls are distributed
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df.A.fillna(5.0, inplace=True)
DF = df.A.fillna(5.0)
s = DF.sum()
m = df.A.mean()
v = df.A.var()
t = df.A.std()
Ac = df.A.cumsum()
return Ac.sum() + s + m + v + t
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(dist_IR_contains('dist_cumsum'))
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_quantile_parallel(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_parallel_float_nan(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float32)})
df.A[0:100] = np.nan
df.A[200:331] = np.nan
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_parallel_int(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.int32)})
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_quantile_sequential(self):
def test_impl(A):
df = pd.DataFrame({'A': A})
return df.A.quantile(.25)
hpat_func = self.jit(test_impl)
n = 1001
A = np.arange(0, n, 1, np.float64)
np.testing.assert_almost_equal(hpat_func(A), test_impl(A))
@skip_numba_jit
def test_nunique(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
df.A[2] = 0
return df.A.nunique()
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_nunique_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.four.nunique()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@skip_numba_jit
def test_nunique_str(self):
def test_impl(n):
df = pd.DataFrame({'A': ['aa', 'bb', 'aa', 'cc', 'cc']})
return df.A.nunique()
hpat_func = self.jit(test_impl)
n = 1001
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
@unittest.skip('AssertionError - fix needed\n'
'5 != 3\n')
def test_nunique_str_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return df.two.nunique()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
# test compile again for overload related issues
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@skip_numba_jit
def test_unique_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return (df.four.unique() == 3.0).sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@unittest.skip('AssertionError - fix needed\n'
'2 != 1\n')
def test_unique_str_parallel(self):
# TODO: test without file
def test_impl():
df = pq.read_table('example.parquet').to_pandas()
return (df.two.unique() == 'foo').sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
@skip_numba_jit
@skip_sdc_jit('Not implemented in sequential transport layer')
def test_describe(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(0, n, 1, np.float64)})
return df.A.describe()
hpat_func = self.jit(test_impl)
n = 1001
hpat_func(n)
# XXX: test actual output
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_contains_regex(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'ADEF'])
df = pd.DataFrame({'A': A})
B = df.A.str.contains('AB*', regex=True)
return B.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), 2)
@skip_numba_jit
def test_str_contains_noregex(self):
def test_impl():
A = StringArray(['ABC', 'BB', 'ADEF'])
df = pd.DataFrame({'A': A})
B = df.A.str.contains('BB', regex=False)
return B.sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), 1)
@skip_numba_jit
def test_str_replace_regex(self):
def test_impl(df):
return df.A.str.replace('AB*', 'EE', regex=True)
df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_replace_noregex(self):
def test_impl(df):
return df.A.str.replace('AB', 'EE', regex=False)
df = pd.DataFrame({'A': ['ABCC', 'CABBD']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_replace_regex_parallel(self):
def test_impl(df):
B = df.A.str.replace('AB*', 'EE', regex=True)
return B
n = 5
A = ['ABCC', 'CABBD', 'CCD', 'CCDAABB', 'ED']
start, end = get_start_end(n)
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_split(self):
def test_impl(df):
return df.A.str.split(',')
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split_default(self):
def test_impl(df):
return df.A.str.split()
df = pd.DataFrame({'A': ['AB CC', 'C ABB D', 'G ', ' ', 'g\t f']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split_filter(self):
def test_impl(df):
B = df.A.str.split(',')
df2 = pd.DataFrame({'B': B})
return df2[df2.B.str.len() > 1]
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D', 'G', '', 'g,f']})
hpat_func = self.jit(test_impl)
pd.testing.assert_frame_equal(
hpat_func(df), test_impl(df).reset_index(drop=True))
@skip_numba_jit
def test_str_split_box_df(self):
def test_impl(df):
return pd.DataFrame({'B': df.A.str.split(',')})
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df).B, test_impl(df).B, check_names=False)
@skip_numba_jit
def test_str_split_unbox_df(self):
def test_impl(df):
return df.A.iloc[0]
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
df2 = pd.DataFrame({'A': df.A.str.split(',')})
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(df2), test_impl(df2))
@unittest.skip('Getitem Series with list values not implement')
def test_str_split_bool_index(self):
def test_impl(df):
C = df.A.str.split(',')
return C[df.B == 'aa']
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D'], 'B': ['aa', 'bb']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split_parallel(self):
def test_impl(df):
B = df.A.str.split(',')
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_get(self):
def test_impl(df):
B = df.A.str.split(',')
return B.str.get(1)
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_split(self):
def test_impl(df):
return df.A.str.split(',')
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_get_parallel(self):
def test_impl(df):
A = df.A.str.split(',')
B = A.str.get(1)
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD,F', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_str_get_to_numeric(self):
def test_impl(df):
B = df.A.str.split(',')
C = pd.to_numeric(B.str.get(1), errors='coerce')
return C
df = pd.DataFrame({'A': ['AB,12', 'C,321,D']})
hpat_func = self.jit(locals={'C': types.int64[:]})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_flatten(self):
def test_impl(df):
A = df.A.str.split(',')
return pd.Series(list(itertools.chain(*A)))
df = pd.DataFrame({'A': ['AB,CC', 'C,ABB,D']})
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_str_flatten_parallel(self):
def test_impl(df):
A = df.A.str.split(',')
B = pd.Series(list(itertools.chain(*A)))
return B
n = 5
start, end = get_start_end(n)
A = ['AB,CC', 'C,ABB,D', 'CAD', 'CA,D', 'AA,,D']
df = pd.DataFrame({'A': A[start:end]})
hpat_func = self.jit(distributed={'df', 'B'})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
self.assertEqual(count_array_REPs(), 3)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_to_numeric(self):
def test_impl(df):
B = pd.to_numeric(df.A, errors='coerce')
return B
df = pd.DataFrame({'A': ['123.1', '331.2']})
hpat_func = self.jit(locals={'B': types.float64[:]})(test_impl)
pd.testing.assert_series_equal(
hpat_func(df), test_impl(df), check_names=False)
@skip_numba_jit
def test_1D_Var_len(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n) + 1.0})
df1 = df[df.A > 5]
return len(df1.B)
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_rolling1(self):
# size 3 without unroll
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n), 'B': np.random.ranf(n)})
Ac = df.A.rolling(3).sum()
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
# size 7 with unroll
def test_impl_2(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.rolling(7).sum()
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_rolling2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
df['moving average'] = df.A.rolling(window=5, center=True).mean()
return df['moving average'].sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_rolling3(self):
def test_impl(n):
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
Ac = df.A.rolling(3, center=True).apply(lambda a: a[0] + 2 * a[1] + a[2])
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 121
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_shift1(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.shift(1)
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip('Error - fix needed\n'
'NUMA_PES=3 build')
def test_shift2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n) + 1.0, 'B': np.random.ranf(n)})
Ac = df.A.pct_change(1)
return Ac.sum()
hpat_func = self.jit(test_impl)
n = 11
np.testing.assert_almost_equal(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_df_input(self):
def test_impl(df):
return df.B.sum()
n = 121
df = pd.DataFrame({'A': np.ones(n), 'B': np.random.ranf(n)})
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_df_input2(self):
def test_impl(df):
C = df.B == 'two'
return C.sum()
n = 11
df = pd.DataFrame({'A': np.random.ranf(3 * n), 'B': ['one', 'two', 'three'] * n})
hpat_func = self.jit(test_impl)
np.testing.assert_almost_equal(hpat_func(df), test_impl(df))
@skip_numba_jit
def test_df_input_dist1(self):
def test_impl(df):
return df.B.sum()
n = 121
A = [3, 4, 5, 6, 1]
B = [5, 6, 2, 1, 3]
n = 5
start, end = get_start_end(n)
df = pd.DataFrame({'A': A, 'B': B})
df_h = pd.DataFrame({'A': A[start:end], 'B': B[start:end]})
hpat_func = self.jit(distributed={'df'})(test_impl)
np.testing.assert_almost_equal(hpat_func(df_h), test_impl(df))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_concat(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
df3 = pd.concat([df1, df2])
return df3.A.sum() + df3.key2.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_concat_str(self):
def test_impl():
df1 = pq.read_table('example.parquet').to_pandas()
df2 = pq.read_table('example.parquet').to_pandas()
A3 = pd.concat([df1, df2])
return (A3.two == 'foo').sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
def test_concat_series(self):
def test_impl(n):
df1 = pd.DataFrame({'key1': np.arange(n), 'A': np.arange(n) + 1.0})
df2 = pd.DataFrame({'key2': n - np.arange(n), 'A': n + np.arange(n) + 1.0})
A3 = pd.concat([df1.A, df2.A])
return A3.sum()
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
n = 11111
self.assertEqual(hpat_func(n), test_impl(n))
@skip_numba_jit
def test_concat_series_str(self):
def test_impl():
df1 = pq.read_table('example.parquet').to_pandas()
df2 = pq.read_table('example.parquet').to_pandas()
A3 = pd.concat([df1.two, df2.two])
return (A3 == 'foo').sum()
hpat_func = self.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_numba_jit
@unittest.skipIf(int(os.getenv('SDC_NP_MPI', '0')) > 1, 'Test hangs on NP=2 and NP=3 on all platforms')
def test_intraday(self):
def test_impl(nsyms):
max_num_days = 100
all_res = 0.0
for i in sdc.prange(nsyms):
s_open = 20 * np.ones(max_num_days)
s_low = 28 * np.ones(max_num_days)
s_close = 19 * np.ones(max_num_days)
df = pd.DataFrame({'Open': s_open, 'Low': s_low, 'Close': s_close})
df['Stdev'] = df['Close'].rolling(window=90).std()
df['Moving Average'] = df['Close'].rolling(window=20).mean()
df['Criteria1'] = (df['Open'] - df['Low'].shift(1)) < -df['Stdev']
df['Criteria2'] = df['Open'] > df['Moving Average']
df['BUY'] = df['Criteria1'] & df['Criteria2']
df['Pct Change'] = (df['Close'] - df['Open']) / df['Open']
df['Rets'] = df['Pct Change'][df['BUY']]
all_res += df['Rets'].mean()
return all_res
hpat_func = self.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
self.assertEqual(count_array_OneDs(), 0)
self.assertEqual(count_parfor_OneDs(), 1)
@skip_numba_jit
def test_var_dist1(self):
def test_impl(A, B):
df = pd.DataFrame({'A': A, 'B': B})
df2 = df.groupby('A', as_index=False)['B'].sum()
# TODO: fix handling of df setitem to force match of array dists
# probably with a new node that is appended to the end of basic block
# df2['C'] = np.full(len(df2.B), 3, np.int8)
# TODO: full_like for Series
df2['C'] = np.full_like(df2.B.values, 3, np.int8)
return df2
A = np.array([1, 1, 2, 3])
B = np.array([3, 4, 5, 6])
hpat_func = self.jit(locals={'A:input': 'distributed',
'B:input': 'distributed', 'df2:return': 'distributed'})(test_impl)
start, end = get_start_end(len(A))
df2 = hpat_func(A[start:end], B[start:end])
# TODO:
# pd.testing.assert_frame_equal(
# hpat_func(A[start:end], B[start:end]), test_impl(A, B))
if __name__ == "__main__":
unittest.main()
| IntelLabs/hpat | sdc/tests/test_hiframes.py | Python | bsd-2-clause | 29,355 |
import pytest
import urllib.error
from urlpathlib import UrlPath
def test_scheme():
# does not raise NotImplementedError
UrlPath('/dev/null').touch()
def test_scheme_not_supported():
with pytest.raises(NotImplementedError):
UrlPath('http:///tmp/test').touch()
def test_scheme_not_listed():
with pytest.raises(NotImplementedError):
UrlPath('test:///tmp/test').touch()
def test_file_additional():
assert UrlPath('.').resolve() == UrlPath.cwd()
def test_scheme_alias():
# does not raise NotImplementedError
with pytest.raises(urllib.error.URLError):
UrlPath('https://localhost/test').exists()
| morgan-del/urlpathlib | tests/test_dispatch.py | Python | bsd-2-clause | 657 |
# coding: utf8
from __future__ import unicode_literals
from flask import abort, make_response, request
from flask_api.decorators import set_renderers
from flask_api import exceptions, renderers, status, FlaskAPI
import json
import unittest
app = FlaskAPI(__name__)
app.config['TESTING'] = True
class JSONVersion1(renderers.JSONRenderer):
media_type = 'application/json; api-version="1.0"'
class JSONVersion2(renderers.JSONRenderer):
media_type = 'application/json; api-version="2.0"'
@app.route('/set_status_and_headers/')
def set_status_and_headers():
headers = {'Location': 'http://example.com/456'}
return {'example': 'content'}, status.HTTP_201_CREATED, headers
@app.route('/set_headers/')
def set_headers():
headers = {'Location': 'http://example.com/456'}
return {'example': 'content'}, headers
@app.route('/make_response_view/')
def make_response_view():
response = make_response({'example': 'content'})
response.headers['Location'] = 'http://example.com/456'
return response
@app.route('/api_exception/')
def api_exception():
raise exceptions.PermissionDenied()
@app.route('/abort_view/')
def abort_view():
abort(status.HTTP_403_FORBIDDEN)
@app.route('/options/')
def options_view():
return {}
@app.route('/accepted_media_type/')
@set_renderers([JSONVersion2, JSONVersion1])
def accepted_media_type():
return {'accepted_media_type': str(request.accepted_media_type)}
class AppTests(unittest.TestCase):
def test_set_status_and_headers(self):
with app.test_client() as client:
response = client.get('/set_status_and_headers/')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.headers['Location'], 'http://example.com/456')
self.assertEqual(response.content_type, 'application/json')
expected = '{"example": "content"}'
self.assertEqual(response.get_data().decode('utf8'), expected)
def test_set_headers(self):
with app.test_client() as client:
response = client.get('/set_headers/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.headers['Location'], 'http://example.com/456')
self.assertEqual(response.content_type, 'application/json')
expected = '{"example": "content"}'
self.assertEqual(response.get_data().decode('utf8'), expected)
def test_make_response(self):
with app.test_client() as client:
response = client.get('/make_response_view/')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.headers['Location'], 'http://example.com/456')
self.assertEqual(response.content_type, 'application/json')
expected = '{"example": "content"}'
self.assertEqual(response.get_data().decode('utf8'), expected)
def test_api_exception(self):
with app.test_client() as client:
response = client.get('/api_exception/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.content_type, 'application/json')
expected = '{"message": "You do not have permission to perform this action."}'
self.assertEqual(response.get_data().decode('utf8'), expected)
def test_abort_view(self):
with app.test_client() as client:
response = client.get('/abort_view/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_options_view(self):
with app.test_client() as client:
response = client.options('/options/')
# Errors if `response.response` is `None`
response.get_data()
def test_accepted_media_type_property(self):
with app.test_client() as client:
# Explicitly request the "api-version 1.0" renderer.
headers = {'Accept': 'application/json; api-version="1.0"'}
response = client.get('/accepted_media_type/', headers=headers)
data = json.loads(response.get_data().decode('utf8'))
expected = {'accepted_media_type': 'application/json; api-version="1.0"'}
self.assertEqual(data, expected)
# Request the default renderer, which is "api-version 2.0".
headers = {'Accept': '*/*'}
response = client.get('/accepted_media_type/', headers=headers)
data = json.loads(response.get_data().decode('utf8'))
expected = {'accepted_media_type': 'application/json; api-version="2.0"'}
self.assertEqual(data, expected)
| theskumar-archive/flask-api | flask_api/tests/test_app.py | Python | bsd-2-clause | 4,692 |
from celery.task import Task
import requests
class StracksFlushTask(Task):
def run(self, url, data):
requests.post(url + "/", data=data)
| Stracksapp/stracks_api | stracks_api/tasks.py | Python | bsd-2-clause | 152 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('freebasics', '0006_change_site_url_field_type'),
]
operations = [
migrations.AddField(
model_name='freebasicscontroller',
name='postgres_db_url',
field=models.TextField(null=True, blank=True),
),
]
| praekeltfoundation/mc2-freebasics | freebasics/migrations/0007_freebasicscontroller_postgres_db_url.py | Python | bsd-2-clause | 442 |
def excise(conn, qrelname, tid):
with conn.cursor() as cur:
# Assume 'id' column exists and print that for bookkeeping.
#
# TODO: Instead should find unique constraints and print
# those, or try to print all attributes that are not corrupt.
sql = 'DELETE FROM {0} WHERE ctid = %s RETURNING id'.format(qrelname)
params = (tid,)
cur.execute(sql, params)
row = cur.fetchone()
if row:
return row[0]
return None
| fdr/pg_corrupt | pg_corrupt/excise.py | Python | bsd-2-clause | 506 |
import datetime
from django.utils import timezone
from django.test import TestCase
from django.urls import reverse
from .models import Question
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() should return False for questions whose
pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=30)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() should return True for questions whose
pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=1)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Creates a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_questions(self):
"""
If no questions exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_a_past_question(self):
"""
Questions with a pub_date in the past should be displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_a_future_question(self):
"""
Questions with a pub_date in the future should not be displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_index_view_with_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
should be displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_index_view_with_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionIndexDetailTests(TestCase):
def test_detail_view_with_a_future_question(self):
"""
The detail view of a question with a pub_date in the future should
return a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_question(self):
"""
The detail view of a question with a pub_date in the past should
display the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text) | congpc/DjangoExample | mysite/polls/tests.py | Python | bsd-2-clause | 5,010 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--- Day 3: Perfectly Spherical Houses in a Vacuum ---
Santa is delivering presents to an infinite two-dimensional grid of houses.
He begins by delivering a present to the house at his starting location, and
then an elf at the North Pole calls him via radio and tells him where to move
next. Moves are always exactly one house to the north (^), south (v), east (>),
or west (<). After each move, he delivers another present to the house at his
new location.
However, the elf back at the north pole has had a little too much eggnog, and
so his directions are a little off, and Santa ends up visiting some houses more
than once. How many houses receive at least one present?
For example:
- > delivers presents to 2 houses: one at the starting location, and one to the
east.
- ^>v< delivers presents to 4 houses in a square, including twice to the
house at his starting/ending location.
- ^v^v^v^v^v delivers a bunch of presents
to some very lucky children at only 2 houses.
--- Part Two ---
The next year, to speed up the process, Santa creates a robot version of
himself, Robo-Santa, to deliver presents with him.
Santa and Robo-Santa start at the same location (delivering two presents to the
same starting house), then take turns moving based on instructions from the
elf, who is eggnoggedly reading from the same script as the previous year.
This year, how many houses receive at least one present?
For example:
- ^v delivers presents to 3 houses, because Santa goes north, and then Robo-Santa
goes south.
- ^>v< now delivers presents to 3 houses, and Santa and Robo-Santa
end up back where they started.
- ^v^v^v^v^v now delivers presents to 11 houses,
with Santa going one direction and Robo-Santa going the other.
"""
import sys
import click
def update_point(move, point):
"""Returns new point representing position after move"""
moves = {
'^': (0, -1),
'<': (-1, 0),
'v': (0, 1),
'>': (1, 0),
}
return (point[0]+moves.get(move, (0, 0))[0],
point[1]+moves.get(move, (0, 0))[1])
def map_single_delivery(text):
point = (0, 0)
points = set({point})
for move in text:
point = update_point(move, point)
points.add(point)
return points
def number_of_houses_covered(text, robo_santa=False):
return len(map_single_delivery(text)) if not robo_santa else \
len(map_multiple_deliveries(text))
def split_directions(directions):
lists = ('', '')
try:
lists = directions[0::2], directions[1::2]
except IndexError:
pass
return lists
def map_multiple_deliveries(text):
directions = split_directions(text)
points = map_single_delivery(directions[0])
return points.union(map_single_delivery(directions[1]))
def calculate_solution_1(text):
return number_of_houses_covered(text)
def calculate_solution_2(text):
return number_of_houses_covered(text, robo_santa=True)
@click.command()
@click.option('--source_file', default='data/03.txt',
help='source data file for problem')
def main(source_file):
"""Simple solution to adventofcode problem 3."""
data = ''
with open(source_file) as source:
data = source.read()
print('Santa gave at least one present to {} houses.'.format(
number_of_houses_covered(data)))
if __name__ == "__main__":
sys.exit(main())
| MattJDavidson/python-adventofcode | advent/problem_03.py | Python | bsd-2-clause | 3,424 |
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("aclui.dll")
prototypes = \
{
#
'CreateSecurityPage': SimTypeFunction([SimTypeBottom(label="ISecurityInformation")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["psi"]),
#
'EditSecurity': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi"]),
#
'EditSecurityAdvanced': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation"), SimTypeInt(signed=False, label="SI_PAGE_TYPE")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi", "uSIPage"]),
}
lib.set_prototypes(prototypes)
| angr/angr | angr/procedures/definitions/win32_aclui.py | Python | bsd-2-clause | 1,451 |
# -*- coding: utf-8 -*-
"""
femagtools.plot
~~~~~~~~~~~~~~~
Creating plots
"""
import numpy as np
import scipy.interpolate as ip
import logging
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
matplotlibversion = matplotlib.__version__
except ImportError: # ModuleNotFoundError:
matplotlibversion = 0
logger = logging.getLogger("femagtools.plot")
def _create_3d_axis():
"""creates a subplot with 3d projection if one does not already exist"""
from matplotlib.projections import get_projection_class
from matplotlib import _pylab_helpers
create_axis = True
if _pylab_helpers.Gcf.get_active() is not None:
if isinstance(plt.gca(), get_projection_class('3d')):
create_axis = False
if create_axis:
plt.figure()
plt.subplot(111, projection='3d')
def _plot_surface(ax, x, y, z, labels, azim=None):
"""helper function for surface plots"""
# ax.tick_params(axis='both', which='major', pad=-3)
assert np.size(x) > 1 and np.size(y) > 1 and np.size(z) > 1
if azim is not None:
ax.azim = azim
X, Y = np.meshgrid(x, y)
Z = np.ma.masked_invalid(z)
ax.plot_surface(X, Y, Z,
rstride=1, cstride=1,
cmap=cm.viridis, alpha=0.85,
vmin=np.nanmin(z), vmax=np.nanmax(z),
linewidth=0, antialiased=True)
# edgecolor=(0, 0, 0, 0))
# ax.set_xticks(xticks)
# ax.set_yticks(yticks)
# ax.set_zticks(zticks)
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_title(labels[2])
# plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
def __phasor_plot(ax, up, idq, uxdq):
uref = max(up, uxdq[0])
uxd = uxdq[0]/uref
uxq = uxdq[1]/uref
u1d, u1q = (uxd, 1+uxq)
u1 = np.sqrt(u1d**2 + u1q**2)*uref
i1 = np.linalg.norm(idq)
i1d, i1q = (idq[0]/i1, idq[1]/i1)
qhw = 6 # width arrow head
qhl = 15 # length arrow head
qlw = 2 # line width
qts = 10 # textsize
# Length of the Current adjust to Ud: Initally 0.9, Maier(Oswald) = 0.5
curfac = max(0.9, 1.5*i1q/up)
def label_line(ax, X, Y, U, V, label, color='k', size=8):
"""Add a label to a line, at the proper angle.
Arguments
---------
line : matplotlib.lines.Line2D object,
label : str
x : float
x-position to place center of text (in data coordinated
y : float
y-position to place center of text (in data coordinates)
color : str
size : float
"""
x1, x2 = X, X + U
y1, y2 = Y, Y + V
if y2 == 0:
y2 = y1
if x2 == 0:
x2 = x1
x = (x1 + x2) / 2
y = (y1 + y2) / 2
slope_degrees = np.rad2deg(np.angle(U + V * 1j))
if slope_degrees < 0:
slope_degrees += 180
if 90 < slope_degrees <= 270:
slope_degrees += 180
x_offset = np.sin(np.deg2rad(slope_degrees))
y_offset = np.cos(np.deg2rad(slope_degrees))
bbox_props = dict(boxstyle="Round4, pad=0.1", fc="white", lw=0)
text = ax.annotate(label, xy=(x, y), xytext=(x_offset * 10, y_offset * 8),
textcoords='offset points',
size=size, color=color,
horizontalalignment='center',
verticalalignment='center',
fontfamily='monospace', fontweight='bold', bbox=bbox_props)
text.set_rotation(slope_degrees)
return text
if ax == 0:
ax = plt.gca()
ax.axes.xaxis.set_ticklabels([])
ax.axes.yaxis.set_ticklabels([])
# ax.set_aspect('equal')
ax.set_title(
r'$U_1$={0} V, $I_1$={1} A, $U_p$={2} V'.format(
round(u1, 1), round(i1, 1), round(up, 1)), fontsize=14)
up /= uref
ax.quiver(0, 0, 0, up, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw/2, headlength=qhl/2, headaxislength=qhl/2, width=qlw*2, color='k')
label_line(ax, 0, 0, 0, up, '$U_p$', 'k', qts)
ax.quiver(0, 0, u1d, u1q, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='r')
label_line(ax, 0, 0, u1d, u1q, '$U_1$', 'r', qts)
ax.quiver(0, 1, uxd, 0, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='g')
label_line(ax, 0, 1, uxd, 0, '$U_d$', 'g', qts)
ax.quiver(uxd, 1, 0, uxq, angles='xy', scale_units='xy', scale=1, units='dots',
headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='g')
label_line(ax, uxd, 1, 0, uxq, '$U_q$', 'g', qts)
ax.quiver(0, 0, curfac*i1d, curfac*i1q, angles='xy', scale_units='xy', scale=1,
units='dots', headwidth=qhw, headlength=qhl, headaxislength=qhl, width=qlw, color='b')
label_line(ax, 0, 0, curfac*i1d, curfac*i1q, '$I_1$', 'b', qts)
xmin, xmax = (min(0, uxd, i1d), max(0, i1d, uxd))
ymin, ymax = (min(0, i1q, 1-uxq), max(1, i1q, 1+uxq))
ax.set_xlim([xmin-0.1, xmax+0.1])
ax.set_ylim([ymin-0.1, ymax+0.1])
ax.grid(True)
def i1beta_phasor(up, i1, beta, r1, xd, xq, ax=0):
"""creates a phasor plot
up: internal voltage
i1: current
beta: angle i1 vs up [deg]
r1: resistance
xd: reactance in direct axis
xq: reactance in quadrature axis"""
i1d, i1q = (i1*np.sin(beta/180*np.pi), i1*np.cos(beta/180*np.pi))
uxdq = ((r1*i1d - xq*i1q), (r1*i1q + xd*i1d))
__phasor_plot(ax, up, (i1d, i1q), uxdq)
def iqd_phasor(up, iqd, uqd, ax=0):
"""creates a phasor plot
up: internal voltage
iqd: current
uqd: terminal voltage"""
uxdq = (uqd[1]/np.sqrt(2), (uqd[0]/np.sqrt(2)-up))
__phasor_plot(ax, up, (iqd[1]/np.sqrt(2), iqd[0]/np.sqrt(2)), uxdq)
def phasor(bch, ax=0):
"""create phasor plot from bch"""
f1 = bch.machine['p']*bch.dqPar['speed']
w1 = 2*np.pi*f1
xd = w1*bch.dqPar['ld'][-1]
xq = w1*bch.dqPar['lq'][-1]
r1 = bch.machine['r1']
i1beta_phasor(bch.dqPar['up'][-1],
bch.dqPar['i1'][-1], bch.dqPar['beta'][-1],
r1, xd, xq, ax)
def airgap(airgap, ax=0):
"""creates plot of flux density in airgap"""
if ax == 0:
ax = plt.gca()
ax.set_title('Airgap Flux Density [T]')
ax.plot(airgap['pos'], airgap['B'],
label='Max {:4.2f} T'.format(max(airgap['B'])))
ax.plot(airgap['pos'], airgap['B_fft'],
label='Base Ampl {:4.2f} T'.format(airgap['Bamp']))
ax.set_xlabel('Position/°')
ax.legend()
ax.grid(True)
def airgap_fft(airgap, bmin=1e-2, ax=0):
"""plot airgap harmonics"""
unit = 'T'
if ax == 0:
ax = plt.gca()
ax.set_title('Airgap Flux Density Harmonics / {}'.format(unit))
ax.grid(True)
order, fluxdens = np.array([(n, b) for n, b in zip(airgap['nue'],
airgap['B_nue']) if b > bmin]).T
try:
markerline1, stemlines1, _ = ax.stem(order, fluxdens, '-.', basefmt=" ",
use_line_collection=True)
ax.set_xticks(order)
except ValueError: # empty sequence
pass
def torque(pos, torque, ax=0):
"""creates plot from torque vs position"""
k = 20
alpha = np.linspace(pos[0], pos[-1],
k*len(torque))
f = ip.interp1d(pos, torque, kind='quadratic')
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
if ax == 0:
ax = plt.gca()
ax.set_title('Torque / {}'.format(unit))
ax.grid(True)
ax.plot(pos, [scale*t for t in torque], 'go')
ax.plot(alpha, scale*f(alpha))
if np.min(torque) > 0 and np.max(torque) > 0:
ax.set_ylim(bottom=0)
elif np.min(torque) < 0 and np.max(torque) < 0:
ax.set_ylim(top=0)
def torque_fft(order, torque, ax=0):
"""plot torque harmonics"""
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
if ax == 0:
ax = plt.gca()
ax.set_title('Torque Harmonics / {}'.format(unit))
ax.grid(True)
try:
bw = 2.5E-2*max(order)
ax.bar(order, [scale*t for t in torque], width=bw, align='center')
ax.set_xlim(left=-bw/2)
except ValueError: # empty sequence
pass
def force(title, pos, force, xlabel='', ax=0):
"""plot force vs position"""
unit = 'N'
scale = 1
if min(force) < -9.9e3 or max(force) > 9.9e3:
scale = 1e-3
unit = 'kN'
if ax == 0:
ax = plt.gca()
ax.set_title('{} / {}'.format(title, unit))
ax.grid(True)
ax.plot(pos, [scale*f for f in force])
if xlabel:
ax.set_xlabel(xlabel)
if min(force) > 0:
ax.set_ylim(bottom=0)
def force_fft(order, force, ax=0):
"""plot force harmonics"""
unit = 'N'
scale = 1
if min(force) < -9.9e3 or max(force) > 9.9e3:
scale = 1e-3
unit = 'kN'
if ax == 0:
ax = plt.gca()
ax.set_title('Force Harmonics / {}'.format(unit))
ax.grid(True)
try:
bw = 2.5E-2*max(order)
ax.bar(order, [scale*t for t in force], width=bw, align='center')
ax.set_xlim(left=-bw/2)
except ValueError: # empty sequence
pass
def forcedens(title, pos, fdens, ax=0):
"""plot force densities"""
if ax == 0:
ax = plt.gca()
ax.set_title(title)
ax.grid(True)
ax.plot(pos, [1e-3*ft for ft in fdens[0]], label='F tang')
ax.plot(pos, [1e-3*fn for fn in fdens[1]], label='F norm')
ax.legend()
ax.set_xlabel('Pos / deg')
ax.set_ylabel('Force Density / kN/m²')
def forcedens_surface(fdens, ax=0):
if ax == 0:
_create_3d_axis()
ax = plt.gca()
xpos = [p for p in fdens.positions[0]['X']]
ypos = [p['position'] for p in fdens.positions]
z = 1e-3*np.array([p['FN']
for p in fdens.positions])
_plot_surface(ax, xpos, ypos, z,
(u'Rotor pos/°', u'Pos/°', u'F N / kN/m²'))
def forcedens_fft(title, fdens, ax=0):
"""plot force densities FFT
Args:
title: plot title
fdens: force density object
"""
if ax == 0:
ax = plt.axes(projection="3d")
F = 1e-3*fdens.fft()
fmin = 0.2
num_bars = F.shape[0] + 1
_xx, _yy = np.meshgrid(np.arange(1, num_bars),
np.arange(1, num_bars))
z_size = F[F > fmin]
x_pos, y_pos = _xx[F > fmin], _yy[F > fmin]
z_pos = np.zeros_like(z_size)
x_size = 2
y_size = 2
ax.bar3d(x_pos, y_pos, z_pos, x_size, y_size, z_size)
ax.view_init(azim=120)
ax.set_xlim(0, num_bars+1)
ax.set_ylim(0, num_bars+1)
ax.set_title(title)
ax.set_xlabel('M')
ax.set_ylabel('N')
ax.set_zlabel('kN/m²')
def winding_flux(pos, flux, ax=0):
"""plot flux vs position"""
if ax == 0:
ax = plt.gca()
ax.set_title('Winding Flux / Vs')
ax.grid(True)
for p, f in zip(pos, flux):
ax.plot(p, f)
def winding_current(pos, current, ax=0):
"""plot winding currents"""
if ax == 0:
ax = plt.gca()
ax.set_title('Winding Currents / A')
ax.grid(True)
for p, i in zip(pos, current):
ax.plot(p, i)
def voltage(title, pos, voltage, ax=0):
"""plot voltage vs. position"""
if ax == 0:
ax = plt.gca()
ax.set_title('{} / V'.format(title))
ax.grid(True)
ax.plot(pos, voltage)
def voltage_fft(title, order, voltage, ax=0):
"""plot FFT harmonics of voltage"""
if ax == 0:
ax = plt.gca()
ax.set_title('{} / V'.format(title))
ax.grid(True)
if max(order) < 5:
order += [5]
voltage += [0]
try:
bw = 2.5E-2*max(order)
ax.bar(order, voltage, width=bw, align='center')
except ValueError: # empty sequence
pass
def mcv_hbj(mcv, log=True, ax=0):
"""plot H, B, J of mcv dict"""
import femagtools.mcv
MUE0 = 4e-7*np.pi
ji = []
csiz = len(mcv['curve'])
if ax == 0:
ax = plt.gca()
ax.set_title(mcv['name'])
for k, c in enumerate(mcv['curve']):
bh = [(bi, hi*1e-3)
for bi, hi in zip(c['bi'],
c['hi'])]
try:
if csiz == 1 and mcv['ctype'] in (femagtools.mcv.MAGCRV,
femagtools.mcv.ORIENT_CRV):
ji = [b-MUE0*h*1e3 for b, h in bh]
except Exception:
pass
bi, hi = zip(*bh)
label = 'Flux Density'
if csiz > 1:
label = 'Flux Density ({0}°)'.format(mcv.mc1_angle[k])
if log:
ax.semilogx(hi, bi, label=label)
if ji:
ax.semilogx(hi, ji, label='Polarisation')
else:
ax.plot(hi, bi, label=label)
if ji:
ax.plot(hi, ji, label='Polarisation')
ax.set_xlabel('H / kA/m')
ax.set_ylabel('T')
if ji or csiz > 1:
ax.legend(loc='lower right')
ax.grid()
def mcv_muer(mcv, ax=0):
"""plot rel. permeability vs. B of mcv dict"""
MUE0 = 4e-7*np.pi
bi, ur = zip(*[(bx, bx/hx/MUE0)
for bx, hx in zip(mcv['curve'][0]['bi'],
mcv['curve'][0]['hi']) if not hx == 0])
if ax == 0:
ax = plt.gca()
ax.plot(bi, ur)
ax.set_xlabel('B / T')
ax.set_title('rel. Permeability')
ax.grid()
def mtpa(pmrel, i1max, title='', projection='', ax=0):
"""create a line or surface plot with torque and mtpa curve"""
nsamples = 10
i1 = np.linspace(0, i1max, nsamples)
iopt = np.array([pmrel.mtpa(x) for x in i1]).T
iqmax, idmax = pmrel.iqdmax(i1max)
iqmin, idmin = pmrel.iqdmin(i1max)
if projection == '3d':
nsamples = 50
else:
if iqmin == 0:
iqmin = 0.1*iqmax
id = np.linspace(idmin, idmax, nsamples)
iq = np.linspace(iqmin, iqmax, nsamples)
torque_iqd = np.array(
[[pmrel.torque_iqd(x, y)
for y in id] for x in iq])
if projection == '3d':
ax = idq_torque(id, iq, torque_iqd, ax)
ax.plot(iopt[1], iopt[0], iopt[2],
color='red', linewidth=2, label='MTPA: {0:5.0f} Nm'.format(
np.max(iopt[2][-1])))
else:
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
x, y = np.meshgrid(id, iq)
CS = ax.contour(x, y, torque_iqd, 6, colors='k')
ax.clabel(CS, fmt='%d', inline=1)
ax.set_xlabel('Id/A')
ax.set_ylabel('Iq/A')
ax.plot(iopt[1], iopt[0],
color='red', linewidth=2, label='MTPA: {0:5.0f} Nm'.format(
np.max(iopt[2][-1])))
ax.grid()
if title:
ax.set_title(title)
ax.legend()
def mtpv(pmrel, u1max, i1max, title='', projection='', ax=0):
"""create a line or surface plot with voltage and mtpv curve"""
w1 = pmrel.w2_imax_umax(i1max, u1max)
nsamples = 20
if projection == '3d':
nsamples = 50
iqmax, idmax = pmrel.iqdmax(i1max)
iqmin, idmin = pmrel.iqdmin(i1max)
id = np.linspace(idmin, idmax, nsamples)
iq = np.linspace(iqmin, iqmax, nsamples)
u1_iqd = np.array(
[[np.linalg.norm(pmrel.uqd(w1, iqx, idx))/np.sqrt(2)
for idx in id] for iqx in iq])
u1 = np.mean(u1_iqd)
imtpv = np.array([pmrel.mtpv(wx, u1, i1max)
for wx in np.linspace(w1, 20*w1, nsamples)]).T
if projection == '3d':
torque_iqd = np.array(
[[pmrel.torque_iqd(x, y)
for y in id] for x in iq])
ax = idq_torque(id, iq, torque_iqd, ax)
ax.plot(imtpv[1], imtpv[0], imtpv[2],
color='red', linewidth=2)
else:
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
x, y = np.meshgrid(id, iq)
CS = ax.contour(x, y, u1_iqd, 4, colors='b') # linestyles='dashed')
ax.clabel(CS, fmt='%d', inline=1)
ax.plot(imtpv[1], imtpv[0],
color='red', linewidth=2,
label='MTPV: {0:5.0f} Nm'.format(np.max(imtpv[2])))
# beta = np.arctan2(imtpv[1][0], imtpv[0][0])
# b = np.linspace(beta, 0)
# ax.plot(np.sqrt(2)*i1max*np.sin(b), np.sqrt(2)*i1max*np.cos(b), 'r-')
ax.grid()
ax.legend()
ax.set_xlabel('Id/A')
ax.set_ylabel('Iq/A')
if title:
ax.set_title(title)
def __get_linearForce_title_keys(lf):
if 'force_r' in lf:
return ['Force r', 'Force z'], ['force_r', 'force_z']
return ['Force x', 'Force y'], ['force_x', 'force_y']
def pmrelsim(bch, title=''):
"""creates a plot of a PM/Rel motor simulation"""
cols = 2
rows = 4
if len(bch.flux['1']) > 1:
rows += 1
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
plt.subplot(rows, cols, row+1)
tq = list(bch.torque_fft[-1]['torque'])
order = list(bch.torque_fft[-1]['order'])
if order and max(order) < 5:
order += [15]
tq += [0]
torque_fft(order, tq)
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
flux = [bch.flux[k][-1] for k in bch.flux]
pos = [f['displ'] for f in flux]
winding_flux(pos,
[f['flux_k'] for f in flux])
plt.subplot(rows, cols, row+2)
winding_current(pos,
[f['current_k'] for f in flux])
plt.subplot(rows, cols, row+3)
voltage('Internal Voltage',
bch.flux['1'][-1]['displ'],
bch.flux['1'][-1]['voltage_dpsi'])
plt.subplot(rows, cols, row+4)
try:
voltage_fft('Internal Voltage Harmonics',
bch.flux_fft['1'][-1]['order'],
bch.flux_fft['1'][-1]['voltage'])
except:
pass
if len(bch.flux['1']) > 1:
plt.subplot(rows, cols, row+5)
voltage('No Load Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+6)
try:
voltage_fft('No Load Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
except:
pass
fig.tight_layout(h_pad=3.5)
if title:
fig.subplots_adjust(top=0.92)
def multcal(bch, title=''):
"""creates a plot of a MULT CAL simulation"""
cols = 2
rows = 4
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
plt.subplot(rows, cols, row+1)
tq = list(bch.torque_fft[-1]['torque'])
order = list(bch.torque_fft[-1]['order'])
if order and max(order) < 5:
order += [15]
tq += [0]
torque_fft(order, tq)
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
flux = [bch.flux[k][-1] for k in bch.flux]
pos = [f['displ'] for f in flux]
winding_flux(pos,
[f['flux_k'] for f in flux])
plt.subplot(rows, cols, row+2)
winding_current(pos,
[f['current_k'] for f in flux])
plt.subplot(rows, cols, row+3)
voltage('Internal Voltage',
bch.flux['1'][-1]['displ'],
bch.flux['1'][-1]['voltage_dpsi'])
plt.subplot(rows, cols, row+4)
try:
voltage_fft('Internal Voltage Harmonics',
bch.flux_fft['1'][-1]['order'],
bch.flux_fft['1'][-1]['voltage'])
except:
pass
if len(bch.flux['1']) > 1:
plt.subplot(rows, cols, row+5)
voltage('No Load Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+6)
try:
voltage_fft('No Load Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
except:
pass
fig.tight_layout(h_pad=3.5)
if title:
fig.subplots_adjust(top=0.92)
def fasttorque(bch, title=''):
"""creates a plot of a Fast Torque simulation"""
cols = 2
rows = 4
if len(bch.flux['1']) > 1:
rows += 1
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[-1]['angle'], bch.torque[-1]['torque'])
plt.subplot(rows, cols, row+1)
torque_fft(bch.torque_fft[-1]['order'], bch.torque_fft[-1]['torque'])
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[-1]['angle'], bch.torque[-1]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[-1]['angle'], bch.torque[-1]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
flux = [bch.flux[k][-1] for k in bch.flux]
pos = [f['displ'] for f in flux]
winding_flux(pos, [f['flux_k'] for f in flux])
plt.subplot(rows, cols, row+2)
winding_current(pos, [f['current_k'] for f in flux])
plt.subplot(rows, cols, row+3)
voltage('Internal Voltage',
bch.flux['1'][-1]['displ'],
bch.flux['1'][-1]['voltage_dpsi'])
plt.subplot(rows, cols, row+4)
try:
voltage_fft('Internal Voltage Harmonics',
bch.flux_fft['1'][-1]['order'],
bch.flux_fft['1'][-1]['voltage'])
except:
pass
if len(bch.flux['1']) > 1:
plt.subplot(rows, cols, row+5)
voltage('No Load Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+6)
try:
voltage_fft('No Load Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
except:
pass
fig.tight_layout(h_pad=3.5)
if title:
fig.subplots_adjust(top=0.92)
def cogging(bch, title=''):
"""creates a cogging plot"""
cols = 2
rows = 3
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
if bch.torque:
torque(bch.torque[0]['angle'], bch.torque[0]['torque'])
plt.subplot(rows, cols, row+1)
if bch.torque_fft:
torque_fft(bch.torque_fft[0]['order'], bch.torque_fft[0]['torque'])
plt.subplot(rows, cols, row+2)
force('Force Fx',
bch.torque[0]['angle'], bch.torque[0]['force_x'])
plt.subplot(rows, cols, row+3)
force('Force Fy',
bch.torque[0]['angle'], bch.torque[0]['force_y'])
row += 3
elif bch.linearForce:
title, keys = __get_linearForce_title_keys(bch.linearForce[-1])
force(title[0], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[0]], 'Displt. / mm')
plt.subplot(rows, cols, row+1)
force_fft(bch.linearForce_fft[-2]['order'],
bch.linearForce_fft[-2]['force'])
plt.subplot(rows, cols, row+2)
force(title[1], bch.linearForce[-1]['displ'],
bch.linearForce[-1][keys[1]], 'Displt. / mm')
plt.subplot(rows, cols, row+3)
force_fft(bch.linearForce_fft[-1]['order'],
bch.linearForce_fft[-1]['force'])
row += 3
plt.subplot(rows, cols, row+1)
voltage('Voltage',
bch.flux['1'][0]['displ'],
bch.flux['1'][0]['voltage_dpsi'])
plt.subplot(rows, cols, row+2)
voltage_fft('Voltage Harmonics',
bch.flux_fft['1'][0]['order'],
bch.flux_fft['1'][0]['voltage'])
fig.tight_layout(h_pad=2)
if title:
fig.subplots_adjust(top=0.92)
def transientsc(bch, title=''):
"""creates a transient short circuit plot"""
cols = 1
rows = 2
htitle = 1.5 if title else 0
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 3*rows + htitle))
if title:
fig.suptitle(title, fontsize=16)
row = 1
plt.subplot(rows, cols, row)
ax = plt.gca()
ax.set_title('Currents / A')
ax.grid(True)
for i in ('ia', 'ib', 'ic'):
ax.plot(bch.scData['time'], bch.scData[i], label=i)
ax.set_xlabel('Time / s')
ax.legend()
row = 2
plt.subplot(rows, cols, row)
ax = plt.gca()
ax.set_title('Torque / Nm')
ax.grid(True)
ax.plot(bch.scData['time'], bch.scData['torque'])
ax.set_xlabel('Time / s')
fig.tight_layout(h_pad=2)
if title:
fig.subplots_adjust(top=0.92)
def i1beta_torque(i1, beta, torque, title='', ax=0):
"""creates a surface plot of torque vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 210
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -60
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
if title:
_plot_surface(ax, i1, beta, scale*np.asarray(torque),
(u'I1/A', u'Beta/°', title),
azim=azim)
else:
_plot_surface(ax, i1, beta, scale*np.asarray(torque),
(u'I1/A', u'Beta/°', u'Torque/{}'.format(unit)),
azim=azim)
def i1beta_ld(i1, beta, ld, ax=0):
"""creates a surface plot of ld vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, np.asarray(ld)*1e3,
(u'I1/A', u'Beta/°', u'Ld/mH'),
azim=60)
def i1beta_lq(i1, beta, lq, ax=0):
"""creates a surface plot of ld vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 60
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -120
_plot_surface(ax, i1, beta, np.asarray(lq)*1e3,
(u'I1/A', u'Beta/°', u'Lq/mH'),
azim=azim)
def i1beta_psim(i1, beta, psim, ax=0):
"""creates a surface plot of psim vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, psim,
(u'I1/A', u'Beta/°', u'Psi m/Vs'),
azim=60)
def i1beta_up(i1, beta, up, ax=0):
"""creates a surface plot of up vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, i1, beta, up,
(u'I1/A', u'Beta/°', u'Up/V'),
azim=60)
def i1beta_psid(i1, beta, psid, ax=0):
"""creates a surface plot of psid vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = -60
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = 60
_plot_surface(ax, i1, beta, psid,
(u'I1/A', u'Beta/°', u'Psi d/Vs'),
azim=azim)
def i1beta_psiq(i1, beta, psiq, ax=0):
"""creates a surface plot of psiq vs i1, beta"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
azim = 210
if 0 < np.mean(beta) or -90 > np.mean(beta):
azim = -60
_plot_surface(ax, i1, beta, psiq,
(u'I1/A', u'Beta/°', u'Psi q/Vs'),
azim=azim)
def idq_torque(id, iq, torque, ax=0):
"""creates a surface plot of torque vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
unit = 'Nm'
scale = 1
if np.min(torque) < -9.9e3 or np.max(torque) > 9.9e3:
scale = 1e-3
unit = 'kNm'
_plot_surface(ax, id, iq, scale*np.asarray(torque),
(u'Id/A', u'Iq/A', u'Torque/{}'.format(unit)),
azim=-60)
return ax
def idq_psid(id, iq, psid, ax=0):
"""creates a surface plot of psid vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psid,
(u'Id/A', u'Iq/A', u'Psi d/Vs'),
azim=210)
def idq_psiq(id, iq, psiq, ax=0):
"""creates a surface plot of psiq vs id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psiq,
(u'Id/A', u'Iq/A', u'Psi q/Vs'),
azim=210)
def idq_psim(id, iq, psim, ax=0):
"""creates a surface plot of psim vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, psim,
(u'Id/A', u'Iq/A', u'Psi m [Vs]'),
azim=120)
def idq_ld(id, iq, ld, ax=0):
"""creates a surface plot of ld vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, np.asarray(ld)*1e3,
(u'Id/A', u'Iq/A', u'L d/mH'),
azim=120)
def idq_lq(id, iq, lq, ax=0):
"""creates a surface plot of lq vs. id, iq"""
if ax == 0:
_create_3d_axis()
ax = plt.gca()
_plot_surface(ax, id, iq, np.asarray(lq)*1e3,
(u'Id/A', u'Iq/A', u'L q/mH'),
azim=120)
def ldlq(bch):
"""creates the surface plots of a BCH reader object
with a ld-lq identification"""
beta = bch.ldq['beta']
i1 = bch.ldq['i1']
torque = bch.ldq['torque']
ld = np.array(bch.ldq['ld'])
lq = np.array(bch.ldq['lq'])
psid = bch.ldq['psid']
psiq = bch.ldq['psiq']
rows = 3
fig = plt.figure(figsize=(10, 4*rows))
fig.suptitle('Ld-Lq Identification {}'.format(bch.filename), fontsize=16)
fig.add_subplot(rows, 2, 1, projection='3d')
i1beta_torque(i1, beta, torque)
fig.add_subplot(rows, 2, 2, projection='3d')
i1beta_psid(i1, beta, psid)
fig.add_subplot(rows, 2, 3, projection='3d')
i1beta_psiq(i1, beta, psiq)
fig.add_subplot(rows, 2, 4, projection='3d')
try:
i1beta_psim(i1, beta, bch.ldq['psim'])
except:
i1beta_up(i1, beta, bch.ldq['up'])
fig.add_subplot(rows, 2, 5, projection='3d')
i1beta_ld(i1, beta, ld)
fig.add_subplot(rows, 2, 6, projection='3d')
i1beta_lq(i1, beta, lq)
def psidq(bch):
"""creates the surface plots of a BCH reader object
with a psid-psiq identification"""
id = bch.psidq['id']
iq = bch.psidq['iq']
torque = bch.psidq['torque']
ld = np.array(bch.psidq_ldq['ld'])
lq = np.array(bch.psidq_ldq['lq'])
psim = bch.psidq_ldq['psim']
psid = bch.psidq['psid']
psiq = bch.psidq['psiq']
rows = 3
fig = plt.figure(figsize=(10, 4*rows))
fig.suptitle('Psid-Psiq Identification {}'.format(
bch.filename), fontsize=16)
fig.add_subplot(rows, 2, 1, projection='3d')
idq_torque(id, iq, torque)
fig.add_subplot(rows, 2, 2, projection='3d')
idq_psid(id, iq, psid)
fig.add_subplot(rows, 2, 3, projection='3d')
idq_psiq(id, iq, psiq)
fig.add_subplot(rows, 2, 4, projection='3d')
idq_psim(id, iq, psim)
fig.add_subplot(rows, 2, 5, projection='3d')
idq_ld(id, iq, ld)
fig.add_subplot(rows, 2, 6, projection='3d')
idq_lq(id, iq, lq)
def felosses(losses, coeffs, title='', log=True, ax=0):
"""plot iron losses with steinmetz or jordan approximation
Args:
losses: dict with f, B, pfe values
coeffs: list with steinmetz (cw, alpha, beta) or
jordan (cw, alpha, ch, beta, gamma) coeffs
title: title string
log: log scale for x and y axes if True
"""
import femagtools.losscoeffs as lc
if ax == 0:
ax = plt.gca()
fo = losses['fo']
Bo = losses['Bo']
B = plt.np.linspace(0.9*np.min(losses['B']),
1.1*0.9*np.max(losses['B']))
for i, f in enumerate(losses['f']):
pfe = [p for p in np.array(losses['pfe'])[i] if p]
if f > 0:
if len(coeffs) == 5:
ax.plot(B, lc.pfe_jordan(f, B, *coeffs, fo=fo, Bo=Bo))
elif len(coeffs) == 3:
ax.plot(B, lc.pfe_steinmetz(f, B, *coeffs, fo=fo, Bo=Bo))
plt.plot(losses['B'][:len(pfe)], pfe,
marker='o', label="{} Hz".format(f))
ax.set_title("Fe Losses/(W/kg) " + title)
if log:
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel("Flux Density [T]")
# plt.ylabel("Pfe [W/kg]")
ax.legend()
ax.grid(True)
def spel(isa, with_axis=False, ax=0):
"""plot super elements of I7/ISA7 model
Args:
isa: Isa7 object
"""
from matplotlib.patches import Polygon
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
for se in isa.superelements:
ax.add_patch(Polygon([n.xy
for nc in se.nodechains
for n in nc.nodes],
color=isa.color[se.color], lw=0))
ax.autoscale(enable=True)
if not with_axis:
ax.axis('off')
def mesh(isa, with_axis=False, ax=0):
"""plot mesh of I7/ISA7 model
Args:
isa: Isa7 object
"""
from matplotlib.lines import Line2D
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
for el in isa.elements:
pts = [list(i) for i in zip(*[v.xy for v in el.vertices])]
ax.add_line(Line2D(pts[0], pts[1], color='b', ls='-', lw=0.25))
# for nc in isa.nodechains:
# pts = [list(i) for i in zip(*[(n.x, n.y) for n in nc.nodes])]
# ax.add_line(Line2D(pts[0], pts[1], color="b", ls="-", lw=0.25,
# marker=".", ms="2", mec="None"))
# for nc in isa.nodechains:
# if nc.nodemid is not None:
# plt.plot(*nc.nodemid.xy, "rx")
ax.autoscale(enable=True)
if not with_axis:
ax.axis('off')
def _contour(ax, title, elements, values, label='', isa=None):
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
if ax == 0:
ax = plt.gca()
ax.set_aspect('equal')
ax.set_title(title, fontsize=18)
if isa:
for se in isa.superelements:
ax.add_patch(Polygon([n.xy
for nc in se.nodechains
for n in nc.nodes],
color='gray', alpha=0.1, lw=0))
valid_values = np.logical_not(np.isnan(values))
patches = np.array([Polygon([v.xy for v in e.vertices])
for e in elements])[valid_values]
# , cmap=matplotlib.cm.jet, alpha=0.4)
p = PatchCollection(patches, alpha=1.0, match_original=False)
p.set_array(np.asarray(values)[valid_values])
ax.add_collection(p)
cb = plt.colorbar(p)
for patch in np.array([Polygon([v.xy for v in e.vertices],
fc='white', alpha=1.0)
for e in elements])[np.isnan(values)]:
ax.add_patch(patch)
if label:
cb.set_label(label=label, fontsize=18)
ax.autoscale(enable=True)
ax.axis('off')
def demag(isa, ax=0):
"""plot demag of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
emag = [e for e in isa.elements if e.is_magnet()]
demag = np.array([e.demagnetization(isa.MAGN_TEMPERATURE) for e in emag])
_contour(ax, f'Demagnetization at {isa.MAGN_TEMPERATURE} °C',
emag, demag, '-H / kA/m', isa)
logger.info("Max demagnetization %f", np.max(demag))
def demag_pos(isa, pos, icur=-1, ibeta=-1, ax=0):
"""plot demag of NC/I7/ISA7 model at rotor position
Args:
isa: Isa7/NC object
pos: rotor position in degree
icur: cur amplitude index or last index if -1
ibeta: beta angle index or last index if -1
"""
emag = [e for e in isa.elements if e.is_magnet()]
demag = np.array([isa.demagnetization(e, icur, ibeta)[1]
for e in emag])
for i, x in enumerate(isa.pos_el_fe_induction):
if x >= pos/180*np.pi:
break
hpol = demag[:, i]
hpol[hpol == 0] = np.nan
_contour(ax, f'Demagnetization at Pos. {round(x/np.pi*180)}° ({isa.MAGN_TEMPERATURE} °C)',
emag, hpol, '-H / kA/m', isa)
logger.info("Max demagnetization %f kA/m", np.nanmax(hpol))
def flux_density(isa, subreg=[], ax=0):
"""plot flux density of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
if subreg:
if isinstance(subreg, list):
sr = subreg
else:
sr = [subreg]
elements = [e for s in sr for se in isa.get_subregion(s).elements()
for e in se]
else:
elements = [e for e in isa.elements]
fluxd = np.array([np.linalg.norm(e.flux_density()) for e in elements])
_contour(ax, f'Flux Density T', elements, fluxd)
logger.info("Max flux dens %f", np.max(fluxd))
def loss_density(isa, subreg=[], ax=0):
"""plot loss density of NC/I7/ISA7 model
Args:
isa: Isa7/NC object
"""
if subreg:
if isinstance(subreg, list):
sr = subreg
else:
sr = [subreg]
elements = [e for s in sr for sre in isa.get_subregion(s).elements()
for e in sre]
else:
elements = [e for e in isa.elements]
lossd = np.array([e.loss_density*1e-3 for e in elements])
_contour(ax, 'Loss Density kW/m³', elements, lossd)
def mmf(f, title='', ax=0):
"""plot magnetomotive force (mmf) of winding"""
if ax == 0:
ax = plt.gca()
if title:
ax.set_title(title)
ax.plot(np.array(f['pos'])/np.pi*180, f['mmf'])
ax.plot(np.array(f['pos_fft'])/np.pi*180, f['mmf_fft'])
ax.set_xlabel('Position / Deg')
phi = [f['alfa0']/np.pi*180, f['alfa0']/np.pi*180]
y = [min(f['mmf_fft']), 1.1*max(f['mmf_fft'])]
ax.plot(phi, y, '--')
alfa0 = round(f['alfa0']/np.pi*180, 3)
ax.text(phi[0]/2, y[0]+0.05, f"{alfa0}°",
ha="center", va="bottom")
ax.annotate(f"", xy=(phi[0], y[0]),
xytext=(0, y[0]), arrowprops=dict(arrowstyle="->"))
ax.grid()
def mmf_fft(f, title='', mmfmin=1e-2, ax=0):
"""plot winding mmf harmonics"""
if ax == 0:
ax = plt.gca()
if title:
ax.set_title(title)
else:
ax.set_title('MMF Harmonics')
ax.grid(True)
order, mmf = np.array([(n, m) for n, m in zip(f['nue'],
f['mmf_nue']) if m > mmfmin]).T
try:
markerline1, stemlines1, _ = ax.stem(order, mmf, '-.', basefmt=" ",
use_line_collection=True)
ax.set_xticks(order)
except ValueError: # empty sequence
pass
def zoneplan(wdg, ax=0):
"""plot zone plan of winding wdg"""
from matplotlib.patches import Rectangle
upper, lower = wdg.zoneplan()
Qb = len([n for l in upper for n in l])
from femagtools.windings import coil_color
rh = 0.5
if lower:
yl = rh
ymax = 2*rh + 0.2
else:
yl = 0
ymax = rh + 0.2
if ax == 0:
ax = plt.gca()
ax.axis('off')
ax.set_xlim([-0.5, Qb-0.5])
ax.set_ylim([0, ymax])
ax.set_aspect(Qb/6+0.3)
for i, p in enumerate(upper):
for x in p:
ax.add_patch(Rectangle((abs(x)-1.5, yl), 1, rh,
facecolor=coil_color[i],
edgecolor='white', fill=True))
s = f'+{i+1}' if x > 0 else f'-{i+1}'
ax.text(abs(x)-1, yl+rh/2, s, color='black',
ha="center", va="center")
for i, p in enumerate(lower):
for x in p:
ax.add_patch(Rectangle((abs(x)-1.5, yl-rh), 1, rh,
facecolor=coil_color[i],
edgecolor='white', fill=True))
s = f'+{i+1}' if x > 0 else f'-{i+1}'
ax.text(abs(x)-1, yl-rh/2, s, color='black',
ha="center", va="center")
yu = yl+rh
step = 1 if Qb < 25 else 2
if lower:
yl -= rh
margin = 0.05
ax.text(-0.5, yu+margin, f'Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}',
ha='left', va='bottom', size=15)
for i in range(0, Qb, step):
ax.text(i, yl-margin, f'{i+1}', ha="center", va="top")
def winding_factors(wdg, n=8, ax=0):
"""plot winding factors"""
ax = plt.gca()
ax.set_title(f'Winding factors Q={wdg.Q}, p={wdg.p}, q={round(wdg.q,4)}')
ax.grid(True)
order, kwp, kwd, kw = np.array([(n, k1, k2, k3)
for n, k1, k2, k3 in zip(wdg.kw_order(n),
wdg.kwp(n),
wdg.kwd(n),
wdg.kw(n))]).T
try:
markerline1, stemlines1, _ = ax.stem(order-1, kwp, 'C1:', basefmt=" ",
markerfmt='C1.',
use_line_collection=True, label='Pitch')
markerline2, stemlines2, _ = ax.stem(order+1, kwd, 'C2:', basefmt=" ",
markerfmt='C2.',
use_line_collection=True, label='Distribution')
markerline3, stemlines3, _ = ax.stem(order, kw, 'C0-', basefmt=" ",
markerfmt='C0o',
use_line_collection=True, label='Total')
ax.set_xticks(order)
ax.legend()
except ValueError: # empty sequence
pass
def winding(wdg, ax=0):
"""plot coils of windings wdg"""
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from femagtools.windings import coil_color
coil_len = 25
coil_height = 4
dslot = 8
arrow_head_length = 2
arrow_head_width = 2
if ax == 0:
ax = plt.gca()
z = wdg.zoneplan()
xoff = 0
if z[-1]:
xoff = 0.75
yd = dslot*wdg.yd
mh = 2*coil_height/yd
slots = sorted([abs(n) for m in z[0] for n in m])
smax = slots[-1]*dslot
for n in slots:
x = n*dslot
ax.add_patch(Rectangle((x + dslot/4, 1), dslot /
2, coil_len - 2, fc="lightblue"))
ax.text(x, coil_len / 2,
str(n),
horizontalalignment="center",
verticalalignment="center",
backgroundcolor="white",
bbox=dict(boxstyle='circle,pad=0', fc="white", lw=0))
line_thickness = [0.6, 1.2]
for i, layer in enumerate(z):
b = -xoff if i else xoff
lw = line_thickness[i]
for m, mslots in enumerate(layer):
for k in mslots:
x = abs(k) * dslot + b
xpoints = []
ypoints = []
if (i == 0 and (k > 0 or (k < 0 and wdg.l > 1))):
# first layer, positive dir or neg. dir and 2-layers:
# from right bottom
if x + yd > smax+b:
dx = dslot if yd > dslot else yd/4
xpoints = [x + yd//2 + dx - xoff]
ypoints = [-coil_height + mh*dx]
xpoints += [x + yd//2 - xoff, x, x, x + yd//2-xoff]
ypoints += [-coil_height, 0, coil_len,
coil_len+coil_height]
if x + yd > smax+b:
xpoints += [x + yd//2 + dx - xoff]
ypoints += [coil_len+coil_height - mh*dx]
else:
# from left bottom
if x - yd < 0: # and x - yd/2 > -3*dslot:
dx = dslot if yd > dslot else yd/4
xpoints = [x - yd//2 - dx + xoff]
ypoints = [- coil_height + mh*dx]
xpoints += [x - yd//2+xoff, x, x, x - yd/2+xoff]
ypoints += [-coil_height, 0, coil_len,
coil_len+coil_height]
if x - yd < 0: # and x - yd > -3*dslot:
xpoints += [x - yd//2 - dx + xoff]
ypoints += [coil_len + coil_height - mh*dx]
ax.add_line(Line2D(xpoints, ypoints,
color=coil_color[m], lw=lw))
if k > 0:
h = arrow_head_length
y = coil_len * 0.8
else:
h = -arrow_head_length
y = coil_len * 0.2
ax.arrow(x, y, 0, h,
length_includes_head=True,
head_starts_at_zero=False,
head_length=arrow_head_length,
head_width=arrow_head_width,
fc=coil_color[m], lw=0)
if False: # TODO show winding connections
m = 0
for k in [n*wdg.Q/wdg.p/wdg.m + 1 for n in range(wdg.m)]:
if k < len(slots):
x = k * dslot + b + yd/2 - xoff
ax.add_line(Line2D([x, x],
[-2*coil_height, -coil_height],
color=coil_color[m], lw=lw))
ax.text(x, -2*coil_height+0.5, str(m+1), color=coil_color[m])
m += 1
ax.autoscale(enable=True)
ax.set_axis_off()
def main():
import io
import sys
import argparse
from .__init__ import __version__
from femagtools.bch import Reader
argparser = argparse.ArgumentParser(
description='Read BCH/BATCH/PLT file and create a plot')
argparser.add_argument('filename',
help='name of BCH/BATCH/PLT file')
argparser.add_argument(
"--version",
"-v",
action="version",
version="%(prog)s {}, Python {}".format(__version__, sys.version),
help="display version information",
)
args = argparser.parse_args()
if not matplotlibversion:
sys.exit(0)
if not args.filename:
sys.exit(0)
ext = args.filename.split('.')[-1].upper()
if ext.startswith('MC'):
import femagtools.mcv
mcv = femagtools.mcv.read(sys.argv[1])
if mcv['mc1_type'] in (femagtools.mcv.MAGCRV, femagtools.mcv.ORIENT_CRV):
ncols = 2
else: # Permanent Magnet
ncols = 1
fig, ax = plt.subplots(nrows=1, ncols=ncols, figsize=(10, 6))
if ncols > 1:
plt.subplot(1, 2, 1)
mcv_hbj(mcv)
plt.subplot(1, 2, 2)
mcv_muer(mcv)
else:
mcv_hbj(mcv, log=False)
fig.tight_layout()
fig.subplots_adjust(top=0.94)
plt.show()
return
if ext.startswith('PLT'):
import femagtools.forcedens
fdens = femagtools.forcedens.read(args.filename)
cols = 1
rows = 2
fig, ax = plt.subplots(nrows=rows, ncols=cols,
figsize=(10, 10*rows))
title = '{}, Rotor position {}'.format(
fdens.title, fdens.positions[0]['position'])
pos = fdens.positions[0]['X']
FT_FN = (fdens.positions[0]['FT'],
fdens.positions[0]['FN'])
plt.subplot(rows, cols, 1)
forcedens(title, pos, FT_FN)
title = 'Force Density Harmonics'
plt.subplot(rows, cols, 2)
forcedens_fft(title, fdens)
# fig.tight_layout(h_pad=3.5)
# if title:
# fig.subplots_adjust(top=0.92)
plt.show()
return
bchresults = Reader()
with io.open(args.filename, encoding='latin1', errors='ignore') as f:
bchresults.read(f.readlines())
if (bchresults.type.lower().find(
'pm-synchronous-motor simulation') >= 0 or
bchresults.type.lower().find(
'permanet-magnet-synchronous-motor') >= 0 or
bchresults.type.lower().find(
'simulation pm/universal-motor') >= 0):
pmrelsim(bchresults, bchresults.filename)
elif bchresults.type.lower().find(
'multiple calculation of forces and flux') >= 0:
multcal(bchresults, bchresults.filename)
elif bchresults.type.lower().find('cogging calculation') >= 0:
cogging(bchresults, bchresults.filename)
elif bchresults.type.lower().find('ld-lq-identification') >= 0:
ldlq(bchresults)
elif bchresults.type.lower().find('psid-psiq-identification') >= 0:
psidq(bchresults)
elif bchresults.type.lower().find('fast_torque calculation') >= 0:
fasttorque(bchresults)
elif bchresults.type.lower().find('transient sc') >= 0:
transientsc(bchresults, bchresults.filename)
else:
raise ValueError("BCH type {} not yet supported".format(
bchresults.type))
plt.show()
def characteristics(char, title=''):
fig, axs = plt.subplots(2, 2, figsize=(10, 8), sharex=True)
if title:
fig.suptitle(title)
n = np.array(char['n'])*60
pmech = np.array(char['pmech'])*1e-3
axs[0, 0].plot(n, np.array(char['T']), 'C0-', label='Torque')
axs[0, 0].set_ylabel("Torque / Nm")
axs[0, 0].grid()
axs[0, 0].legend(loc='center left')
ax1 = axs[0, 0].twinx()
ax1.plot(n, pmech, 'C1-', label='P mech')
ax1.set_ylabel("Power / kW")
ax1.legend(loc='lower center')
axs[0, 1].plot(n[1:], np.array(char['u1'][1:]), 'C0-', label='Voltage')
axs[0, 1].set_ylabel("Voltage / V",)
axs[0, 1].grid()
axs[0, 1].legend(loc='center left')
ax2 = axs[0, 1].twinx()
ax2.plot(n[1:], char['cosphi'][1:], 'C1-', label='Cos Phi')
ax2.set_ylabel("Cos Phi")
ax2.legend(loc='lower right')
if 'id' in char:
axs[1, 0].plot(n, np.array(char['id']), label='Id')
if 'iq' in char:
axs[1, 0].plot(n, np.array(char['iq']), label='Iq')
axs[1, 0].plot(n, np.array(char['i1']), label='I1')
axs[1, 0].set_xlabel("Speed / rpm")
axs[1, 0].set_ylabel("Current / A")
axs[1, 0].legend(loc='center left')
if 'beta' in char:
ax3 = axs[1, 0].twinx()
ax3.plot(n, char['beta'], 'C3-', label='Beta')
ax3.set_ylabel("Beta / °")
ax3.legend(loc='center right')
axs[1, 0].grid()
plfe = np.array(char['plfe'])*1e-3
plcu = np.array(char['plcu'])*1e-3
pl = np.array(char['losses'])*1e-3
axs[1, 1].plot(n, plcu, 'C0-', label='Cu Losses')
axs[1, 1].plot(n, plfe, 'C1-', label='Fe Losses')
axs[1, 1].set_ylabel("Losses / kW")
axs[1, 1].legend(loc='center left')
axs[1, 1].grid()
axs[1, 1].set_xlabel("Speed / rpm")
ax4 = axs[1, 1].twinx()
ax4.plot(n[1:-1], char['eta'][1:-1], 'C3-', label="Eta")
ax4.legend(loc='upper center')
ax4.set_ylabel("Efficiency")
fig.tight_layout()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
main()
| SEMAFORInformatik/femagtools | femagtools/plot.py | Python | bsd-2-clause | 54,354 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from . import _wrap_numbers, Symbol, Number, Matrix
def symbols(s):
""" mimics sympy.symbols """
tup = tuple(map(Symbol, s.replace(',', ' ').split()))
if len(tup) == 1:
return tup[0]
else:
return tup
def symarray(prefix, shape):
import numpy as np
arr = np.empty(shape, dtype=object)
for index in np.ndindex(shape):
arr[index] = Symbol('%s_%s' % (
prefix, '_'.join(map(str, index))))
return arr
def lambdify(args, exprs):
"""
lambdify mimics sympy.lambdify
"""
try:
nargs = len(args)
except TypeError:
args = (args,)
nargs = 1
try:
nexprs = len(exprs)
except TypeError:
exprs = (exprs,)
nexprs = 1
@_wrap_numbers
def f(*inp):
if len(inp) != nargs:
raise TypeError("Incorrect number of arguments")
try:
len(inp)
except TypeError:
inp = (inp,)
subsd = dict(zip(args, inp))
return [expr.subs(subsd).evalf() for expr in exprs][
0 if nexprs == 1 else slice(None)]
return f
class Lambdify(object):
"""
Lambdify mimics symengine.Lambdify
"""
def __init__(self, syms, exprs):
self.syms = syms
self.exprs = exprs
def __call__(self, inp, out=None):
inp = tuple(map(Number.make, inp))
subsd = dict(zip(self.syms, inp))
def _eval(expr_iter):
return [expr.subs(subsd).evalf() for expr in expr_iter]
exprs = self.exprs
if out is not None:
try:
out.flat = _eval(exprs.flatten())
except AttributeError:
out.flat = _eval(exprs)
elif isinstance(exprs, Matrix):
import numpy as np
nr, nc = exprs.nrows, exprs.ncols
out = np.empty((nr, nc))
for ri in range(nr):
for ci in range(nc):
out[ri, ci] = exprs._get_element(
ri*nc + ci).subs(subsd).evalf()
return out
# return Matrix(nr, nc, _eval(exprs._get_element(i) for
# i in range(nr*nc)))
elif hasattr(exprs, 'reshape'):
# NumPy like container:
container = exprs.__class__(exprs.shape, dtype=float, order='C')
container.flat = _eval(exprs.flatten())
return container
else:
return _eval(exprs)
| bjodah/pysym | pysym/util.py | Python | bsd-2-clause | 2,569 |
# Add your own choices here!
fruit = ["apples", "oranges", "pears", "grapes", "blueberries"]
lunch = ["pho", "timmies", "thai", "burgers", "buffet!", "indian", "montanas"]
situations = {"fruit":fruit, "lunch":lunch}
| rbracken/internbot | plugins/pick/choices.py | Python | bsd-2-clause | 218 |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
"""
sim_det_noise.py implements the noise simulation operator, OpSimNoise.
"""
import numpy as np
from ..op import Operator
from ..ctoast import sim_noise_sim_noise_timestream as sim_noise_timestream
from .. import timing as timing
class OpSimNoise(Operator):
"""
Operator which generates noise timestreams.
This passes through each observation and every process generates data
for its assigned samples. The dictionary for each observation should
include a unique 'ID' used in the random number generation. The
observation dictionary can optionally include a 'global_offset' member
that might be useful if you are splitting observations and want to
enforce reproducibility of a given sample, even when using
different-sized observations.
Args:
out (str): accumulate data to the cache with name <out>_<detector>.
If the named cache objects do not exist, then they are created.
realization (int): if simulating multiple realizations, the realization
index.
component (int): the component index to use for this noise simulation.
noise (str): PSD key in the observation dictionary.
"""
def __init__(self, out='noise', realization=0, component=0, noise='noise',
rate=None, altFFT=False):
# We call the parent class constructor, which currently does nothing
super().__init__()
self._out = out
self._oversample = 2
self._realization = realization
self._component = component
self._noisekey = noise
self._rate = rate
self._altfft = altFFT
def exec(self, data):
"""
Generate noise timestreams.
This iterates over all observations and detectors and generates
the noise timestreams based on the noise object for the current
observation.
Args:
data (toast.Data): The distributed data.
Raises:
KeyError: If an observation in data does not have noise
object defined under given key.
RuntimeError: If observations are not split into chunks.
"""
autotimer = timing.auto_timer(type(self).__name__)
for obs in data.obs:
obsindx = 0
if 'id' in obs:
obsindx = obs['id']
else:
print("Warning: observation ID is not set, using zero!")
telescope = 0
if 'telescope' in obs:
telescope = obs['telescope_id']
global_offset = 0
if 'global_offset' in obs:
global_offset = obs['global_offset']
tod = obs['tod']
if self._noisekey in obs:
nse = obs[self._noisekey]
else:
raise KeyError('Observation does not contain noise under '
'"{}"'.format(self._noisekey))
if tod.local_chunks is None:
raise RuntimeError('noise simulation for uniform distributed '
'samples not implemented')
# eventually we'll redistribute, to allow long correlations...
if self._rate is None:
times = tod.local_times()
else:
times = None
# Iterate over each chunk.
chunk_first = tod.local_samples[0]
for curchunk in range(tod.local_chunks[1]):
chunk_first += self.simulate_chunk(
tod=tod, nse=nse,
curchunk=curchunk, chunk_first=chunk_first,
obsindx=obsindx, times=times,
telescope=telescope, global_offset=global_offset)
return
def simulate_chunk(self, *, tod, nse, curchunk, chunk_first,
obsindx, times, telescope, global_offset):
"""
Simulate one chunk of noise for all detectors.
Args:
tod (toast.tod.TOD): TOD object for the observation.
nse (toast.tod.Noise): Noise object for the observation.
curchunk (int): The local index of the chunk to simulate.
chunk_first (int): First global sample index of the chunk.
obsindx (int): Observation index for random number stream.
times (int): Timestamps for effective sample rate.
telescope (int): Telescope index for random number stream.
global_offset (int): Global offset for random number stream.
Returns:
chunk_samp (int): Number of simulated samples
"""
autotimer = timing.auto_timer(type(self).__name__)
chunk_samp = tod.total_chunks[tod.local_chunks[0] + curchunk]
local_offset = chunk_first - tod.local_samples[0]
if self._rate is None:
# compute effective sample rate
rate = 1 / np.median(np.diff(
times[local_offset : local_offset+chunk_samp]))
else:
rate = self._rate
for key in nse.keys:
# Check if noise matching this PSD key is needed
weight = 0.
for det in tod.local_dets:
weight += np.abs(nse.weight(det, key))
if weight == 0:
continue
# Simulate the noise matching this key
#nsedata = sim_noise_timestream(
# self._realization, telescope, self._component, obsindx,
# nse.index(key), rate, chunk_first+global_offset, chunk_samp,
# self._oversample, nse.freq(key), nse.psd(key),
# self._altfft)[0]
nsedata = sim_noise_timestream(
self._realization, telescope, self._component, obsindx,
nse.index(key), rate, chunk_first+global_offset, chunk_samp,
self._oversample, nse.freq(key), nse.psd(key))
# Add the noise to all detectors that have nonzero weights
for det in tod.local_dets:
weight = nse.weight(det, key)
if weight == 0:
continue
cachename = '{}_{}'.format(self._out, det)
if tod.cache.exists(cachename):
ref = tod.cache.reference(cachename)
else:
ref = tod.cache.create(cachename, np.float64,
(tod.local_samples[1], ))
ref[local_offset : local_offset+chunk_samp] += weight*nsedata
del ref
return chunk_samp
| tskisner/pytoast | src/python/tod/sim_det_noise.py | Python | bsd-2-clause | 6,740 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import zmq
import time
if __name__ == '__main__':
ctx = zmq.Context()
worker = ctx.socket(zmq.PULL)
worker.connect('tcp://localhost:5555')
sinker = ctx.socket(zmq.PUSH)
sinker.connect('tcp://localhost:6666')
print 'all workers are ready ...'
while True:
try:
msg = worker.recv()
print 'begin to work on task use `%s ms`' % msg
time.sleep(int(msg) * 0.001)
print '\tfinished this task'
sinker.send('finished task which used `%s ms`' % msg)
except KeyboardInterrupt:
break
sinker.close()
worker.close()
| ASMlover/study | zeroMQ/python/push-pull/worker.py | Python | bsd-2-clause | 1,961 |
from unittest import TestCase
import pkg_resources
from mock import patch
from click import UsageError
from click.testing import CliRunner
class TestCli(TestCase):
@patch('molo.core.cookiecutter.cookiecutter')
def test_scaffold(self, mock_cookiecutter):
from molo.core.scripts import cli
package = pkg_resources.get_distribution('molo.core')
runner = CliRunner()
runner.invoke(cli.scaffold, ['foo'])
[call] = mock_cookiecutter.call_args_list
args, kwargs = call
self.assertTrue(kwargs['extra_context'].pop('secret_key'))
self.assertEqual(kwargs, {
'no_input': True,
'extra_context': {
'app_name': 'foo',
'directory': 'foo',
'author': 'Praekelt Foundation',
'author_email': '[email protected]',
'url': None,
'license': 'BSD',
'molo_version': package.version,
'require': (),
'include': (),
}
})
@patch('molo.core.cookiecutter.cookiecutter')
def test_scaffold_with_custom_dir(self, mock_cookiecutter):
from molo.core.scripts import cli
package = pkg_resources.get_distribution('molo.core')
runner = CliRunner()
runner.invoke(cli.scaffold, ['foo', 'bar'])
[call] = mock_cookiecutter.call_args_list
args, kwargs = call
self.assertTrue(kwargs['extra_context'].pop('secret_key'))
self.assertEqual(kwargs, {
'no_input': True,
'extra_context': {
'app_name': 'foo',
'directory': 'bar',
'author': 'Praekelt Foundation',
'author_email': '[email protected]',
'url': None,
'license': 'BSD',
'molo_version': package.version,
'require': (),
'include': (),
}
})
@patch('molo.core.cookiecutter.cookiecutter')
def test_scaffold_with_requirements(self, mock_cookiecutter):
from molo.core.scripts import cli
package = pkg_resources.get_distribution('molo.core')
runner = CliRunner()
runner.invoke(cli.scaffold, ['foo', '--require', 'bar'])
[call] = mock_cookiecutter.call_args_list
args, kwargs = call
self.assertTrue(kwargs['extra_context'].pop('secret_key'))
self.assertEqual(kwargs, {
'no_input': True,
'extra_context': {
'app_name': 'foo',
'directory': 'foo',
'author': 'Praekelt Foundation',
'author_email': '[email protected]',
'url': None,
'license': 'BSD',
'molo_version': package.version,
'require': ('bar',),
'include': (),
}
})
@patch('molo.core.cookiecutter.cookiecutter')
def test_scaffold_with_includes(self, mock_cookiecutter):
from molo.core.scripts import cli
package = pkg_resources.get_distribution('molo.core')
runner = CliRunner()
runner.invoke(cli.scaffold, ['foo', '--include', 'bar', 'baz'])
[call] = mock_cookiecutter.call_args_list
args, kwargs = call
self.assertTrue(kwargs['extra_context'].pop('secret_key'))
self.assertEqual(kwargs, {
'no_input': True,
'extra_context': {
'app_name': 'foo',
'directory': 'foo',
'author': 'Praekelt Foundation',
'author_email': '[email protected]',
'url': None,
'license': 'BSD',
'molo_version': package.version,
'require': (),
'include': (('bar', 'baz'),),
}
})
@patch('molo.core.scripts.cli.get_package')
@patch('molo.core.scripts.cli.get_template_dirs')
@patch('shutil.copytree')
def test_unpack(self, mock_copytree, mock_get_template_dirs,
mock_get_package):
package = pkg_resources.get_distribution('molo.core')
mock_get_package.return_value = package
mock_get_template_dirs.return_value = ['foo']
mock_copytree.return_value = True
from molo.core.scripts import cli
runner = CliRunner()
runner.invoke(cli.unpack_templates, ['app1', 'app2'])
mock_copytree.assert_called_with(
pkg_resources.resource_filename('molo.core', 'templates/foo'),
pkg_resources.resource_filename('molo.core', 'templates/foo'))
def test_get_package(self):
from molo.core.scripts.cli import get_package
self.assertRaisesRegexp(
UsageError, 'molo.foo is not installed.', get_package, 'molo.foo')
| praekelt/molo | molo/core/scripts/tests/test_cli.py | Python | bsd-2-clause | 4,820 |
import numpy as np
from scipy.sparse import csr_matrix
class AliasArray(np.ndarray):
"""An ndarray with a mapping of values to user-friendly names -- see example
This ndarray subclass enables comparing sub_id and hop_id arrays directly with
their friendly string identifiers. The mapping parameter translates sublattice
or hopping names into their number IDs.
Only the `==` and `!=` operators are overloaded to handle the aliases.
Examples
--------
>>> a = AliasArray([0, 1, 0], mapping={"A": 0, "B": 1})
>>> list(a == 0)
[True, False, True]
>>> list(a == "A")
[True, False, True]
>>> list(a != "A")
[False, True, False]
>>> a = AliasArray([0, 1, 0, 2], mapping={"A|1": 0, "B": 1, "A|2": 2})
>>> list(a == "A")
[True, False, True, True]
>>> list(a != "A")
[False, True, False, False]
"""
def __new__(cls, array, mapping):
obj = np.asarray(array).view(cls)
obj.mapping = {SplitName(k): v for k, v in mapping.items()}
return obj
def __array_finalize__(self, obj):
if obj is None:
return
self.mapping = getattr(obj, "mapping", None)
def _mapped_eq(self, other):
if other in self.mapping:
return super().__eq__(self.mapping[other])
else:
result = np.zeros(len(self), dtype=np.bool)
for k, v in self.mapping.items():
if k == other:
result = np.logical_or(result, super().__eq__(v))
return result
def __eq__(self, other):
if isinstance(other, str):
return self._mapped_eq(other)
else:
return super().__eq__(other)
def __ne__(self, other):
if isinstance(other, str):
return np.logical_not(self._mapped_eq(other))
else:
return super().__ne__(other)
# noinspection PyAbstractClass
class AliasCSRMatrix(csr_matrix):
"""Same as :class:`AliasArray` but for a CSR matrix
Examples
--------
>>> from scipy.sparse import spdiags
>>> m = AliasCSRMatrix(spdiags([1, 2, 1], [0], 3, 3), mapping={'A': 1, 'B': 2})
>>> list(m.data == 'A')
[True, False, True]
>>> list(m.tocoo().data == 'A')
[True, False, True]
>>> list(m[:2].data == 'A')
[True, False]
"""
def __init__(self, *args, **kwargs):
mapping = kwargs.pop('mapping', {})
if not mapping:
mapping = getattr(args[0], 'mapping', {})
super().__init__(*args, **kwargs)
self.data = AliasArray(self.data, mapping)
@property
def format(self):
return 'csr'
@format.setter
def format(self, _):
pass
@property
def mapping(self):
return self.data.mapping
def tocoo(self, *args, **kwargs):
coo = super().tocoo(*args, **kwargs)
coo.data = AliasArray(coo.data, mapping=self.mapping)
return coo
def __getitem__(self, item):
result = super().__getitem__(item)
if getattr(result, 'format', '') == 'csr':
return AliasCSRMatrix(result, mapping=self.mapping)
else:
return result
class AliasIndex:
"""An all-or-nothing array index based on equality with a specific value
The `==` and `!=` operators are overloaded to return a lazy array which is either
all `True` or all `False`. See the examples below. This is useful for modifiers
where the each call gets arrays with the same sub_id/hop_id for all elements.
Instead of passing an `AliasArray` with `.size` identical element, `AliasIndex`
does the same all-or-nothing indexing.
Examples
--------
>>> l = np.array([1, 2, 3])
>>> ai = AliasIndex("A", len(l))
>>> list(l[ai == "A"])
[1, 2, 3]
>>> list(l[ai == "B"])
[]
>>> list(l[ai != "A"])
[]
>>> list(l[ai != "B"])
[1, 2, 3]
>>> np.logical_and([True, False, True], ai == "A")
array([ True, False, True], dtype=bool)
>>> np.logical_and([True, False, True], ai != "A")
array([False, False, False], dtype=bool)
>>> bool(ai == "A")
True
>>> bool(ai != "A")
False
>>> str(ai)
'A'
>>> hash(ai) == hash("A")
True
>>> int(ai.eye)
1
>>> np.allclose(AliasIndex("A", 1, (2, 2)).eye, np.eye(2))
True
"""
class LazyArray:
def __init__(self, value, shape):
self.value = value
self.shape = shape
def __bool__(self):
return bool(self.value)
def __array__(self):
return np.full(self.shape, self.value)
def __init__(self, name, shape, orbs=(1, 1)):
self.name = name
self.shape = shape
self.orbs = orbs
def __str__(self):
return self.name
def __eq__(self, other):
return self.LazyArray(self.name == other, self.shape)
def __ne__(self, other):
return self.LazyArray(self.name != other, self.shape)
def __hash__(self):
return hash(self.name)
@property
def eye(self):
return np.eye(*self.orbs)
class SplitName(str):
"""String subclass with special support for strings of the form "first|second"
Operators `==` and `!=` are overloaded to return `True` even if only the first part matches.
Examples
--------
>>> s = SplitName("first|second")
>>> s == "first|second"
True
>>> s != "first|second"
False
>>> s == "first"
True
>>> s != "first"
False
>>> s == "second"
False
>>> s != "second"
True
"""
@property
def first(self):
return self.split("|")[0]
def __eq__(self, other):
return super().__eq__(other) or self.first == other
def __ne__(self, other):
return super().__ne__(other) and self.first != other
def __hash__(self):
return super().__hash__()
| MAndelkovic/pybinding | pybinding/support/alias.py | Python | bsd-2-clause | 5,869 |
# The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import warnings
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ... import units as u
from ...tests.helper import raises
from ...extern.six.moves import zip
from ...utils.compat import NUMPY_LT_1_13
class TestUfuncCoverage(object):
"""Test that we cover all ufunc's"""
def test_coverage(self):
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if type(ufunc) == np.ufunc])
from .. import quantity_helper as qh
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
assert all_np_ufuncs - all_q_ufuncs == set([])
assert all_q_ufuncs - all_np_ufuncs == set([])
class TestQuantityTrigonometricFuncs(object):
"""
Test trigonometric functions
"""
def test_sin_scalar(self):
q = np.sin(30. * u.degree)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_sin_array(self):
q = np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([0., 1. / np.sqrt(2.), 1.]), atol=1.e-15)
def test_arcsin_scalar(self):
q1 = 30. * u.degree
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arcsin_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arcsin(np.sin(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_sin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.sin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'sin' function "
"to quantities with angle units")
def test_arcsin_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arcsin(3. * u.m)
assert exc.value.args[0] == ("Can only apply 'arcsin' function to "
"dimensionless quantities")
def test_arcsin_no_warning_on_unscaled_quantity(self):
a = 15 * u.kpc
b = 27 * u.pc
with warnings.catch_warnings():
warnings.filterwarnings('error')
np.arcsin(b/a)
def test_cos_scalar(self):
q = np.cos(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, 0.5)
def test_cos_array(self):
q = np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([1., 1. / np.sqrt(2.), 0.]), atol=1.e-15)
def test_arccos_scalar(self):
q1 = np.pi / 3. * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_arccos_array(self):
q1 = np.array([0., np.pi / 4., np.pi / 2.]) * u.radian
q2 = np.arccos(np.cos(q1)).to(q1.unit)
assert_allclose(q1.value, q2.value)
def test_cos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.cos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'cos' function "
"to quantities with angle units")
def test_arccos_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arccos(3. * u.s)
assert exc.value.args[0] == ("Can only apply 'arccos' function to "
"dimensionless quantities")
def test_tan_scalar(self):
q = np.tan(np.pi / 3. * u.radian)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value, np.sqrt(3.))
def test_tan_array(self):
q = np.tan(np.array([0., 45., 135., 180.]) * u.degree)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
np.array([0., 1., -1., 0.]), atol=1.e-15)
def test_arctan_scalar(self):
q = np.pi / 3. * u.radian
assert np.arctan(np.tan(q))
def test_arctan_array(self):
q = np.array([10., 30., 70., 80.]) * u.degree
assert_allclose(np.arctan(np.tan(q)).to_value(q.unit), q.value)
def test_tan_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.tan(np.array([1, 2, 3]) * u.N)
assert exc.value.args[0] == ("Can only apply 'tan' function "
"to quantities with angle units")
def test_arctan_invalid_units(self):
with pytest.raises(TypeError) as exc:
np.arctan(np.array([1, 2, 3]) * u.N)
assert exc.value.args[0] == ("Can only apply 'arctan' function to "
"dimensionless quantities")
def test_arctan2_valid(self):
q1 = np.array([10., 30., 70., 80.]) * u.m
q2 = 2.0 * u.km
assert np.arctan2(q1, q2).unit == u.radian
assert_allclose(np.arctan2(q1, q2).value,
np.arctan2(q1.value, q2.to_value(q1.unit)))
q3 = q1 / q2
q4 = 1.
at2 = np.arctan2(q3, q4)
assert_allclose(at2.value, np.arctan2(q3.to_value(1), q4))
def test_arctan2_invalid(self):
with pytest.raises(u.UnitsError) as exc:
np.arctan2(np.array([1, 2, 3]) * u.N, 1. * u.s)
assert "compatible dimensions" in exc.value.args[0]
with pytest.raises(u.UnitsError) as exc:
np.arctan2(np.array([1, 2, 3]) * u.N, 1.)
assert "dimensionless quantities when other arg" in exc.value.args[0]
def test_radians(self):
q1 = np.deg2rad(180. * u.degree)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = np.radians(180. * u.degree)
assert_allclose(q2.value, np.pi)
assert q2.unit == u.radian
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q3 = np.deg2rad(3. * u.radian)
assert_allclose(q3.value, 3.)
assert q3.unit == u.radian
q4 = np.radians(3. * u.radian)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
np.deg2rad(3. * u.m)
with pytest.raises(TypeError):
np.radians(3. * u.m)
def test_degrees(self):
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q1 = np.rad2deg(60. * u.degree)
assert_allclose(q1.value, 60.)
assert q1.unit == u.degree
q2 = np.degrees(60. * u.degree)
assert_allclose(q2.value, 60.)
assert q2.unit == u.degree
q3 = np.rad2deg(np.pi * u.radian)
assert_allclose(q3.value, 180.)
assert q3.unit == u.degree
q4 = np.degrees(np.pi * u.radian)
assert_allclose(q4.value, 180.)
assert q4.unit == u.degree
with pytest.raises(TypeError):
np.rad2deg(3. * u.m)
with pytest.raises(TypeError):
np.degrees(3. * u.m)
class TestQuantityMathFuncs(object):
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
if hasattr(np, 'divmod'): # not NUMPY_LT_1_13
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
# cbrt only introduced in numpy 1.10
# heaviside only introduced in numpy 1.13
@pytest.mark.skipif("not hasattr(np, 'heaviside')")
def test_heaviside_scalar(self):
assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert np.heaviside(0. * u.s,
25 * u.percent) == 0.25 * u.dimensionless_unscaled
assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
@pytest.mark.skipif("not hasattr(np, 'heaviside')")
def test_heaviside_array(self):
values = np.array([-1., 0., 0., +1.])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(np.heaviside(values * u.m,
halfway * u.dimensionless_unscaled) ==
[0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
@pytest.mark.skipif("not hasattr(np, 'cbrt')")
def test_cbrt_scalar(self):
assert np.cbrt(8. * u.m**3) == 2. * u.m
@pytest.mark.skipif("not hasattr(np, 'cbrt')")
def test_cbrt_array(self):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(np.cbrt(values * u.m**3) ==
np.cbrt(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
# float_power only introduced in numpy 1.12
@pytest.mark.skipif("not hasattr(np, 'float_power')")
def test_float_power_array(self):
assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
@raises(ValueError)
def test_power_array_array(self):
np.power(4. * u.m, [2., 4.])
@raises(ValueError)
def test_power_array_array2(self):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2., 4.] * u.m / u.m
powers = [2., 4.]
res = np.power(q, powers)
assert np.all(res.value == q.value ** powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2., 4.] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2 ** 2
assert np.all(res3.value == q2.value ** 2)
assert res3.unit == q2.unit ** 2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) == -np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) == -np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, np.array([-2., 2., -4.]) * u.m) == np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs(object):
# np.positive was only added in numpy 1.13.
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil] +
[np.positive] if hasattr(np, 'positive') else [])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_one_arbitrary(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
arbitrary_unit_value = np.array([0.])
q_o = ufunc(q_i1, arbitrary_unit_value)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs(object):
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == np.bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == np.bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2)
.to_value(u.dimensionless_unscaled), 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestInplaceUfuncs(object):
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value/10., out=s)
assert check is s
assert np.all(check.value == np.arcsin(value/10.))
assert check.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100. * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v) # cannot use out1,out2 keywords with numpy 1.7
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# in np<1.13, without __array_ufunc__, one cannot replace input with
# first output when scaling
v4 = v_copy.copy()
if NUMPY_LT_1_13:
with pytest.raises(TypeError):
np.modf(v4, v4, tmp)
else:
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.
assert check is s
assert np.all(check.value == value / 2.)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2. * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1. * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2. / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert np.all(s.value == np.array([3., 6., 9.]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert_allclose(s.value, np.arctan2(1., 2.))
assert s.unit is u.radian
@pytest.mark.skipif(NUMPY_LT_1_13, reason="numpy >=1.13 required.")
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.*u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1. * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += (20.*u.km)
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.xfail("NUMPY_LT_1_13")
class TestUfuncAt(object):
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.*u.km)
np.add.at(check, i, 1000.)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.*u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1*u.s)
# but be fine if it does not
s = np.arange(10.) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.) * u.m
np.multiply.at(s, i, 2.)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.*u.km)
@pytest.mark.xfail("NUMPY_LT_1_13")
class TestUfuncReduceReduceatAccumulate(object):
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
@pytest.mark.xfail("NUMPY_LT_1_13")
class TestUfuncOuter(object):
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.) * u.m
s2 = np.arange(2.) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
| AustereCuriosity/astropy | astropy/units/tests/test_quantity_ufuncs.py | Python | bsd-3-clause | 38,162 |
#!/usr/bin/env python
"""
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, University of California, Berkeley
# All rights reserved.
# Authors: Cameron Lee ([email protected]) and Dmitry Berenson (
[email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of California, Berkeley nor the names
of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
This node advertises an action which is used by the main lightning node
(see run_lightning.py) to run the Retrieve and Repair portion of LightningROS.
This node relies on a planner_stoppable type node to repair the paths, the
PathTools library to retrieve paths from the library (this is not a separate
node; just a python library that it calls), and the PathTools python library
which calls the collision_checker service and advertises a topic for displaying
stuff in RViz.
"""
import roslib
import rospy
import actionlib
import threading
from tools.PathTools import PlanTrajectoryWrapper, InvalidSectionWrapper, DrawPointsWrapper
from pathlib.PathLibrary import *
from lightning.msg import Float64Array, RRAction, RRResult
from lightning.msg import StopPlanning, RRStats
from lightning.srv import ManagePathLibrary, ManagePathLibraryResponse
import sys
import pickle
import time
# Name of this node.
RR_NODE_NAME = "rr_node"
# Name to use for stopping the repair planner. Published from this node.
STOP_PLANNER_NAME = "stop_rr_planning"
# Topic to subscribe to for stopping the whole node in the middle of processing.
STOP_RR_NAME = "stop_all_rr"
# Name of library managing service run from this node.
MANAGE_LIBRARY = "manage_path_library"
STATE_RETRIEVE, STATE_REPAIR, STATE_RETURN_PATH, STATE_FINISHED, STATE_FINISHED = (0, 1, 2, 3, 4)
class RRNode:
def __init__(self):
# Retrieve ROS parameters and configuration and cosntruct various objects.
self.robot_name = rospy.get_param("robot_name")
self.planner_config_name = rospy.get_param("planner_config_name")
self.current_joint_names = []
self.current_group_name = ""
self.plan_trajectory_wrapper = PlanTrajectoryWrapper("rr", int(rospy.get_param("~num_rr_planners")))
self.invalid_section_wrapper = InvalidSectionWrapper()
self.path_library = PathLibrary(rospy.get_param("~path_library_dir"), rospy.get_param("step_size"), node_size=int(rospy.get_param("~path_library_path_node_size")), sg_node_size=int(rospy.get_param("~path_library_sg_node_size")), dtw_dist=float(rospy.get_param("~dtw_distance")))
self.num_paths_checked = int(rospy.get_param("~num_paths_to_collision_check"))
self.stop_lock = threading.Lock()
self.stop = True
self.rr_server = actionlib.SimpleActionServer(RR_NODE_NAME, RRAction, execute_cb=self._retrieve_repair, auto_start=False)
self.rr_server.start()
self.stop_rr_subscriber = rospy.Subscriber(STOP_RR_NAME, StopPlanning, self._stop_rr_planner)
self.stop_rr_planner_publisher = rospy.Publisher(STOP_PLANNER_NAME, StopPlanning, queue_size=10)
self.manage_library_service = rospy.Service(MANAGE_LIBRARY, ManagePathLibrary, self._do_manage_action)
self.stats_pub = rospy.Publisher("rr_stats", RRStats, queue_size=10)
self.repaired_sections_lock = threading.Lock()
self.repaired_sections = []
self.working_lock = threading.Lock() #to ensure that node is not doing RR and doing a library management action at the same time
#if draw_points is True, then display points in rviz
self.draw_points = rospy.get_param("draw_points")
if self.draw_points:
self.draw_points_wrapper = DrawPointsWrapper()
def _set_repaired_section(self, index, section):
"""
After you have done the path planning to repair a section, store
the repaired path section.
Args:
index (int): the index corresponding to the section being repaired.
section (path, list of list of float): A path to store.
"""
self.repaired_sections_lock.acquire()
self.repaired_sections[index] = section
self.repaired_sections_lock.release()
def _call_planner(self, start, goal, planning_time):
"""
Calls a standard planner to plan between two points with an allowed
planning time.
Args:
start (list of float): A joint configuration corresponding to the
start position of the path.
goal (list of float): The jount configuration corresponding to the
goal position for the path.
Returns:
path: A list of joint configurations corresponding to the planned
path.
"""
ret = None
planner_number = self.plan_trajectory_wrapper.acquire_planner()
if not self._need_to_stop():
ret = self.plan_trajectory_wrapper.plan_trajectory(start, goal, planner_number, self.current_joint_names, self.current_group_name, planning_time, self.planner_config_name)
self.plan_trajectory_wrapper.release_planner(planner_number)
return ret
def _repair_thread(self, index, start, goal, start_index, goal_index, planning_time):
"""
Handles repairing a portion of the path.
All that this function really does is to plan from scratch between
the start and goal configurations and then store the planned path
in the appropriate places and draws either the repaired path or, if
the repair fails, the start and goal.
Args:
index (int): The index to pass to _set_repaired_section(),
corresponding to which of the invalid sections of the path we are
repairing.
start (list of float): The start joint configuration to use.
goal (list of float): The goal joint configuration to use.
start_index (int): The index in the overall path corresponding to
start. Only used for debugging info.
goal_index (int): The index in the overall path corresponding to
goal. Only used for debugging info.
planning_time (float): Maximum allowed time to spend planning, in
seconds.
"""
repaired_path = self._call_planner(start, goal, planning_time)
if self.draw_points:
if repaired_path is not None and len(repaired_path) > 0:
rospy.loginfo("RR action server: got repaired section with start = %s, goal = %s" % (repaired_path[0], repaired_path[-1]))
self.draw_points_wrapper.draw_points(repaired_path, self.current_group_name, "repaired"+str(start_index)+"_"+str(goal_index), DrawPointsWrapper.ANGLES, DrawPointsWrapper.GREENBLUE, 1.0, 0.01)
else:
if self.draw_points:
rospy.loginfo("RR action server: path repair for section (%i, %i) failed, start = %s, goal = %s" % (start_index, goal_index, start, goal))
self.draw_points_wrapper.draw_points([start, goal], self.current_group_name, "failed_repair"+str(start_index)+"_"+str(goal_index), DrawPointsWrapper.ANGLES, DrawPointsWrapper.GREENBLUE, 1.0)
if self._need_to_stop():
self._set_repaired_section(index, None)
else:
self._set_repaired_section(index, repaired_path)
def _need_to_stop(self):
self.stop_lock.acquire();
ret = self.stop;
self.stop_lock.release();
return ret;
def _set_stop_value(self, val):
self.stop_lock.acquire();
self.stop = val;
self.stop_lock.release();
def do_retrieved_path_drawing(self, projected, retrieved, invalid):
"""
Draws the points from the various paths involved in the planning
in different colors in different namespaces.
All of the arguments are lists of joint configurations, where each
joint configuration is a list of joint angles.
The only distinction between the different arguments being passed in
are which color the points in question are being drawn in.
Uses the DrawPointsWrapper to draw the points.
Args:
projected (list of list of float): List of points to draw as
projected between the library path and the actual start/goal
position. Will be drawn in blue.
retrieved (list of list of float): The path retrieved straight
from the path library. Will be drawn in white.
invalid (list of list of float): List of points which were invalid.
Will be drawn in red.
"""
if len(projected) > 0:
if self.draw_points:
self.draw_points_wrapper.draw_points(retrieved, self.current_group_name, "retrieved", DrawPointsWrapper.ANGLES, DrawPointsWrapper.WHITE, 0.1)
projectionDisplay = projected[:projected.index(retrieved[0])]+projected[projected.index(retrieved[-1])+1:]
self.draw_points_wrapper.draw_points(projectionDisplay, self.current_group_name, "projection", DrawPointsWrapper.ANGLES, DrawPointsWrapper.BLUE, 0.2)
invalidDisplay = []
for invSec in invalid:
invalidDisplay += projected[invSec[0]+1:invSec[-1]]
self.draw_points_wrapper.draw_points(invalidDisplay, self.current_group_name, "invalid", DrawPointsWrapper.ANGLES, DrawPointsWrapper.RED, 0.2)
def _retrieve_repair(self, action_goal):
"""
Callback which performs the full Retrieve and Repair for the path.
"""
self.working_lock.acquire()
self.start_time = time.time()
self.stats_msg = RRStats()
self._set_stop_value(False)
if self.draw_points:
self.draw_points_wrapper.clear_points()
rospy.loginfo("RR action server: RR got an action goal")
s, g = action_goal.start, action_goal.goal
res = RRResult()
res.status.status = res.status.FAILURE
self.current_joint_names = action_goal.joint_names
self.current_group_name = action_goal.group_name
projected, retrieved, invalid = [], [], []
repair_state = STATE_RETRIEVE
self.stats_msg.init_time = time.time() - self.start_time
# Go through the retrieve, repair, and return stages of the planning.
# The while loop should only ever go through 3 iterations, one for each
# stage.
while not self._need_to_stop() and repair_state != STATE_FINISHED:
if repair_state == STATE_RETRIEVE:
start_retrieve = time.time()
projected, retrieved, invalid = self.path_library.retrieve_path(s, g, self.num_paths_checked, self.robot_name, self.current_group_name, self.current_joint_names)
self.stats_msg.retrieve_time.append(time.time() - start_retrieve)
if len(projected) == 0:
rospy.loginfo("RR action server: got an empty path for retrieve state")
repair_state = STATE_FINISHED
else:
start_draw = time.time()
if self.draw_points:
self.do_retrieved_path_drawing(projected, retrieved, invalid)
self.stats_msg.draw_time.append(time.time() - start_draw)
repair_state = STATE_REPAIR
elif repair_state == STATE_REPAIR:
start_repair = time.time()
repaired = self._path_repair(projected, action_goal.allowed_planning_time.to_sec(), invalid_sections=invalid)
self.stats_msg.repair_time.append(time.time() - start_repair)
if repaired is None:
rospy.loginfo("RR action server: path repair didn't finish")
repair_state = STATE_FINISHED
else:
repair_state = STATE_RETURN_PATH
elif repair_state == STATE_RETURN_PATH:
start_return = time.time()
res.status.status = res.status.SUCCESS
res.retrieved_path = [Float64Array(p) for p in retrieved]
res.repaired_path = [Float64Array(p) for p in repaired]
rospy.loginfo("RR action server: returning a path")
repair_state = STATE_FINISHED
self.stats_msg.return_time = time.time() - start_return
if repair_state == STATE_RETRIEVE:
rospy.loginfo("RR action server: stopped before it retrieved a path")
elif repair_state == STATE_REPAIR:
rospy.loginfo("RR action server: stopped before it could repair a retrieved path")
elif repair_state == STATE_RETURN_PATH:
rospy.loginfo("RR action server: stopped before it could return a repaired path")
self.rr_server.set_succeeded(res)
self.stats_msg.total_time = time.time() - self.start_time
self.stats_pub.publish(self.stats_msg)
self.working_lock.release()
def _path_repair(self, original_path, planning_time, invalid_sections=None, use_parallel_repairing=True):
"""
Goes through each invalid section in a path and calls a planner to
repair it, with the potential for multi-threading. Returns the
repaired path.
Args:
original_path (path): The original path which needs repairing.
planning_time (float): The maximum allowed planning time for
each repair, in seconds.
invalid_sections (list of pairs of indicies): The pairs of indicies
describing the invalid sections. If None, then the invalid
sections will be computed by this function.
use_parallel_repairing (bool): Whether or not to use multi-threading.
Returns:
path: The repaired path.
"""
zeros_tuple = tuple([0 for i in xrange(len(self.current_joint_names))])
rospy.loginfo("RR action server: got path with %d points" % len(original_path))
if invalid_sections is None:
invalid_sections = self.invalid_section_wrapper.getInvalidSectionsForPath(original_path, self.current_group_name)
rospy.loginfo("RR action server: invalid sections: %s" % (str(invalid_sections)))
if len(invalid_sections) > 0:
if invalid_sections[0][0] == -1:
rospy.loginfo("RR action server: Start is not a valid state...nothing can be done")
return None
if invalid_sections[-1][1] == len(original_path):
rospy.loginfo("RR action server: Goal is not a valid state...nothing can be done")
return None
if use_parallel_repairing:
#multi-threaded repairing
self.repaired_sections = [None for i in xrange(len(invalid_sections))]
#each thread replans an invalid section
threadList = []
for i, sec in enumerate(invalid_sections):
th = threading.Thread(target=self._repair_thread, args=(i, original_path[sec[0]], original_path[sec[-1]], sec[0], sec[-1], planning_time))
threadList.append(th)
th.start()
for th in threadList:
th.join()
#once all threads return, then the repaired sections can be combined
for item in self.repaired_sections:
if item is None:
rospy.loginfo("RR action server: RR node was stopped during repair or repair failed")
return None
#replace invalid sections with replanned sections
new_path = original_path[0:invalid_sections[0][0]]
for i in xrange(len(invalid_sections)):
new_path += self.repaired_sections[i]
if i+1 < len(invalid_sections):
new_path += original_path[invalid_sections[i][1]+1:invalid_sections[i+1][0]]
new_path += original_path[invalid_sections[-1][1]+1:]
self.repaired_sections = [] #reset repaired_sections
else:
#single-threaded repairing
rospy.loginfo("RR action server: Got invalid sections: %s" % str(invalid_sections))
new_path = original_path[0:invalid_sections[0][0]]
for i in xrange(len(invalid_sections)):
if not self._need_to_stop():
#start_invalid and end_invalid must correspond to valid states when passed to the planner
start_invalid, end_invalid = invalid_sections[i]
rospy.loginfo("RR action server: Requesting path to replace from %d to %d" % (start_invalid, end_invalid))
repairedSection = self._call_planner(original_path[start_invalid], original_path[end_invalid])
if repairedSection is None:
rospy.loginfo("RR action server: RR section repair was stopped or failed")
return None
rospy.loginfo("RR action server: Planner returned a trajectory of %d points for %d to %d" % (len(repairedSection), start_invalid, end_invalid))
new_path += repairedSection
if i+1 < len(invalid_sections):
new_path += original_path[end_invalid+1:invalid_sections[i+1][0]]
else:
rospy.loginfo("RR action server: RR was stopped while it was repairing the retrieved path")
return None
new_path += original_path[invalid_sections[-1][1]+1:]
rospy.loginfo("RR action server: Trajectory after replan has %d points" % len(new_path))
else:
new_path = original_path
rospy.loginfo("RR action server: new trajectory has %i points" % (len(new_path)))
return new_path
def _stop_rr_planner(self, msg):
self._set_stop_value(True)
rospy.loginfo("RR action server: RR node got a stop message")
self.stop_rr_planner_publisher.publish(msg)
def _do_manage_action(self, request):
"""
Processes a ManagePathLibraryRequest as part of the ManagePathLibrary
service. Basically, either stores a path in the library or deletes it.
"""
response = ManagePathLibraryResponse()
response.result = response.FAILURE
if request.robot_name == "" or len(request.joint_names) == 0:
rospy.logerr("RR action server: robot name or joint names were not provided")
return response
self.working_lock.acquire()
if request.action == request.ACTION_STORE:
rospy.loginfo("RR action server: got a path to store in path library")
if len(request.path_to_store) > 0:
new_path = [p.positions for p in request.path_to_store]
if len(request.retrieved_path) == 0:
#PFS won so just store the path
store_path_result = self.path_library.store_path(new_path, request.robot_name, request.joint_names)
else:
store_path_result = self.path_library.store_path(new_path, request.robot_name, request.joint_names, [p.positions for p in request.retrieved_path])
response.result = response.SUCCESS
response.path_stored, response.num_library_paths = store_path_result
else:
response.message = "Path to store had no points"
elif request.action == request.ACTION_DELETE_PATH:
rospy.loginfo("RR action server: got a request to delete path %i in the path library" % (request.delete_id))
if self.path_library.delete_path_by_id(request.delete_id, request.robot_name, request.joint_names):
response.result = response.SUCCESS
else:
response.message = "No path in the library had id %i" % (request.delete_id)
elif request.action == request.ACTION_DELETE_LIBRARY:
rospy.loginfo("RR action server: got a request to delete library corresponding to robot %s and joints %s" % (request.robot_name, request.joint_names))
if self.path_library.delete_library(request.robot_name, request.joint_names):
response.result = response.SUCCESS
else:
response.message = "No library corresponding to robot %s and joint names %s exists"
else:
rospy.logerr("RR action server: manage path library request did not have a valid action set")
self.working_lock.release()
return response
if __name__ == "__main__":
try:
rospy.init_node("rr_node")
RRNode()
rospy.loginfo("Retrieve-repair: ready")
rospy.spin()
except rospy.ROSInterruptException:
pass
| WPI-ARC/lightning_ros | scripts/RR_action_server.py | Python | bsd-3-clause | 22,385 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.