max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
poll/models/telemetry_models.py | mirokrastev/poll-website | 3 | 2900 | <filename>poll/models/telemetry_models.py
from django.db import models
from django.contrib.auth import get_user_model
from poll.models.poll_models import Poll
class BasePollTelemetry(models.Model):
"""
This Base class gives a hint that in the future
more Telemetry classes could be implemented.
"""
poll = models.ForeignKey(db_index=True, to=Poll, on_delete=models.CASCADE)
def __str__(self):
return str(self.poll)
class Meta:
abstract = True
class AnonymousUserPollTelemetry(models.Model):
"""
To "store" the anonymous users that have viewed the Poll,
I need to store their IP Addresses. It will NEVER be displayed outside the admin panel.
"""
anonymous_user = models.GenericIPAddressField(blank=True, null=True)
def __str__(self):
return self.anonymous_user
class UsersPollTelemetry(BasePollTelemetry):
users = models.ManyToManyField(db_index=True, to=get_user_model())
anonymous_users = models.ManyToManyField(db_index=True, to=AnonymousUserPollTelemetry)
class Meta:
verbose_name = 'PollTelemetry'
verbose_name_plural = 'PollTelemetry'
| 2.46875 | 2 |
pcdet/models/backbones_2d/__init__.py | HenryLittle/OpenPCDet-HL | 0 | 2901 | from .base_bev_backbone import BaseBEVBackbone
from .decouple_bev_backbone import DecoupledBEVBackbone
__all__ = {
'BaseBEVBackbone': BaseBEVBackbone,
'DecoupledBEVBackbone': DecoupledBEVBackbone,
}
| 1.117188 | 1 |
test/unit/app/tools/test_select_parameters.py | beatrizserrano/galaxy | 0 | 2902 | from unittest.mock import Mock
import pytest
from galaxy import model
from galaxy.tools.parameters import basic
from .util import BaseParameterTestCase
class SelectToolParameterTestCase(BaseParameterTestCase):
def test_validated_values(self):
self.options_xml = """<options><filter type="data_meta" ref="input_bam" key="dbkey"/></options>"""
with pytest.raises(ValueError) as exc_info:
self.param.from_json("42", self.trans, {"input_bam": model.HistoryDatasetAssociation()})
assert str(exc_info.value) == "parameter 'my_name': requires a value, but no legal values defined"
def test_validated_values_missing_dependency(self):
self.options_xml = """<options><filter type="data_meta" ref="input_bam" key="dbkey"/></options>"""
with pytest.raises(ValueError) as exc_info:
self.param.from_json("42", self.trans)
assert str(exc_info.value) == "parameter 'my_name': requires a value, but no legal values defined"
def test_unvalidated_values(self):
self.options_xml = """<options><filter type="data_meta" ref="input_bam" key="dbkey"/></options>"""
self.trans.workflow_building_mode = True
assert self.param.from_json("42", self.trans) == "42"
def test_validated_datasets(self):
self.options_xml = """<options><filter type="data_meta" ref="input_bam" key="dbkey"/></options>"""
with pytest.raises(ValueError) as exc_info:
self.param.from_json(model.HistoryDatasetAssociation(), self.trans, {"input_bam": None})
assert str(exc_info.value) == "parameter 'my_name': requires a value, but no legal values defined"
def test_unvalidated_datasets(self):
self.options_xml = """<options><filter type="data_meta" ref="input_bam" key="dbkey"/></options>"""
self.trans.workflow_building_mode = True
assert isinstance(
self.param.from_json(model.HistoryDatasetAssociation(), self.trans, {"input_bam": basic.RuntimeValue()}),
model.HistoryDatasetAssociation,
)
def test_filter_param_value(self):
self.options_xml = """<options from_data_table="test_table"><filter type="param_value" ref="input_bam" column="0" /></options>"""
assert ("testname1", "testpath1", False) in self.param.get_options(self.trans, {"input_bam": "testname1"})
assert ("testname2", "testpath2", False) in self.param.get_options(self.trans, {"input_bam": "testname2"})
assert len(self.param.get_options(self.trans, {"input_bam": "testname3"})) == 0
def test_filter_param_value2(self):
# Same test as above, but filtering on a different column.
self.options_xml = """<options from_data_table="test_table"><filter type="param_value" ref="input_bam" column="1" /></options>"""
assert ("testname1", "testpath1", False) in self.param.get_options(self.trans, {"input_bam": "testpath1"})
assert ("testname2", "testpath2", False) in self.param.get_options(self.trans, {"input_bam": "testpath2"})
assert len(self.param.get_options(self.trans, {"input_bam": "testpath3"})) == 0
# TODO: Good deal of overlap here with DataToolParameterTestCase,
# refactor.
def setUp(self):
super().setUp()
self.test_history = model.History()
self.app.model.context.add(self.test_history)
self.app.model.context.flush()
self.app.tool_data_tables["test_table"] = MockToolDataTable()
self.trans = Mock(
app=self.app,
get_history=lambda: self.test_history,
get_current_user_roles=lambda: [],
workflow_building_mode=False,
webapp=Mock(name="galaxy"),
)
self.type = "select"
self.set_data_ref = False
self.multiple = False
self.optional = False
self.options_xml = ""
self._param = None
@property
def param(self):
if not self._param:
multi_text = ""
if self.multiple:
multi_text = 'multiple="True"'
optional_text = ""
if self.optional:
optional_text = 'optional="True"'
options_text = self.options_xml
data_ref_text = ""
if self.set_data_ref:
data_ref_text = 'data_ref="input_bam"'
template_xml = """<param name="my_name" type="%s" %s %s %s>%s</param>"""
param_str = template_xml % (self.type, data_ref_text, multi_text, optional_text, options_text)
self._param = self._parameter_for(xml=param_str)
return self._param
class MockToolDataTable:
def __init__(self):
self.columns = dict(
name=0,
value=1,
)
self.missing_index_file = None
def get_fields(self):
return [["testname1", "testpath1"], ["testname2", "testpath2"]]
| 2.421875 | 2 |
recumpiler/__init__.py | Toasterstein/recumpiler | 0 | 2903 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""recumpiler
Recompile text to be semi-readable memey garbage.
"""
__version__ = (0, 0, 0)
| 1.34375 | 1 |
net/net.gyp | codenote/chromium-test | 0 | 2904 | <reponame>codenote/chromium-test<gh_stars>0
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'linux_link_kerberos%': 0,
'conditions': [
['chromeos==1 or OS=="android" or OS=="ios"', {
# Disable Kerberos on ChromeOS, Android and iOS, at least for now.
# It needs configuration (krb5.conf and so on).
'use_kerberos%': 0,
}, { # chromeos == 0
'use_kerberos%': 1,
}],
['OS=="android" and target_arch != "ia32"', {
# The way the cache uses mmap() is inefficient on some Android devices.
# If this flag is set, we hackily avoid using mmap() in the disk cache.
# We are pretty confident that mmap-ing the index would not hurt any
# existing x86 android devices, but we cannot be so sure about the
# variety of ARM devices. So enable it for x86 only for now.
'posix_avoid_mmap%': 1,
}, {
'posix_avoid_mmap%': 0,
}],
['OS=="ios"', {
# Websockets and socket stream are not used on iOS.
'enable_websockets%': 0,
# iOS does not use V8.
'use_v8_in_net%': 0,
'enable_built_in_dns%': 0,
}, {
'enable_websockets%': 1,
'use_v8_in_net%': 1,
'enable_built_in_dns%': 1,
}],
],
},
'includes': [
'../build/win_precompile.gypi',
],
'targets': [
{
'target_name': 'net',
'type': '<(component)',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../crypto/crypto.gyp:crypto',
'../sdch/sdch.gyp:sdch',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/zlib/zlib.gyp:zlib',
'net_resources',
],
'sources': [
'android/cert_verify_result_android.h',
'android/cert_verify_result_android_list.h',
'android/gurl_utils.cc',
'android/gurl_utils.h',
'android/keystore.cc',
'android/keystore.h',
'android/keystore_openssl.cc',
'android/keystore_openssl.h',
'android/net_jni_registrar.cc',
'android/net_jni_registrar.h',
'android/network_change_notifier_android.cc',
'android/network_change_notifier_android.h',
'android/network_change_notifier_delegate_android.cc',
'android/network_change_notifier_delegate_android.h',
'android/network_change_notifier_factory_android.cc',
'android/network_change_notifier_factory_android.h',
'android/network_library.cc',
'android/network_library.h',
'base/address_family.h',
'base/address_list.cc',
'base/address_list.h',
'base/address_tracker_linux.cc',
'base/address_tracker_linux.h',
'base/auth.cc',
'base/auth.h',
'base/backoff_entry.cc',
'base/backoff_entry.h',
'base/bandwidth_metrics.cc',
'base/bandwidth_metrics.h',
'base/big_endian.cc',
'base/big_endian.h',
'base/cache_type.h',
'base/completion_callback.h',
'base/connection_type_histograms.cc',
'base/connection_type_histograms.h',
'base/crypto_module.h',
'base/crypto_module_nss.cc',
'base/crypto_module_openssl.cc',
'base/data_url.cc',
'base/data_url.h',
'base/directory_lister.cc',
'base/directory_lister.h',
'base/dns_reloader.cc',
'base/dns_reloader.h',
'base/dns_util.cc',
'base/dns_util.h',
'base/escape.cc',
'base/escape.h',
'base/expiring_cache.h',
'base/file_stream.cc',
'base/file_stream.h',
'base/file_stream_context.cc',
'base/file_stream_context.h',
'base/file_stream_context_posix.cc',
'base/file_stream_context_win.cc',
'base/file_stream_metrics.cc',
'base/file_stream_metrics.h',
'base/file_stream_metrics_posix.cc',
'base/file_stream_metrics_win.cc',
'base/file_stream_net_log_parameters.cc',
'base/file_stream_net_log_parameters.h',
'base/file_stream_whence.h',
'base/filter.cc',
'base/filter.h',
'base/int128.cc',
'base/int128.h',
'base/gzip_filter.cc',
'base/gzip_filter.h',
'base/gzip_header.cc',
'base/gzip_header.h',
'base/hash_value.cc',
'base/hash_value.h',
'base/host_mapping_rules.cc',
'base/host_mapping_rules.h',
'base/host_port_pair.cc',
'base/host_port_pair.h',
'base/io_buffer.cc',
'base/io_buffer.h',
'base/ip_endpoint.cc',
'base/ip_endpoint.h',
'base/keygen_handler.cc',
'base/keygen_handler.h',
'base/keygen_handler_mac.cc',
'base/keygen_handler_nss.cc',
'base/keygen_handler_openssl.cc',
'base/keygen_handler_win.cc',
'base/linked_hash_map.h',
'base/load_flags.h',
'base/load_flags_list.h',
'base/load_states.h',
'base/load_states_list.h',
'base/load_timing_info.cc',
'base/load_timing_info.h',
'base/mime_sniffer.cc',
'base/mime_sniffer.h',
'base/mime_util.cc',
'base/mime_util.h',
'base/net_error_list.h',
'base/net_errors.cc',
'base/net_errors.h',
'base/net_errors_posix.cc',
'base/net_errors_win.cc',
'base/net_export.h',
'base/net_log.cc',
'base/net_log.h',
'base/net_log_event_type_list.h',
'base/net_log_source_type_list.h',
'base/net_module.cc',
'base/net_module.h',
'base/net_util.cc',
'base/net_util.h',
'base/net_util_posix.cc',
'base/net_util_win.cc',
'base/network_change_notifier.cc',
'base/network_change_notifier.h',
'base/network_change_notifier_factory.h',
'base/network_change_notifier_linux.cc',
'base/network_change_notifier_linux.h',
'base/network_change_notifier_mac.cc',
'base/network_change_notifier_mac.h',
'base/network_change_notifier_win.cc',
'base/network_change_notifier_win.h',
'base/network_config_watcher_mac.cc',
'base/network_config_watcher_mac.h',
'base/network_delegate.cc',
'base/network_delegate.h',
'base/nss_memio.c',
'base/nss_memio.h',
'base/openssl_private_key_store.h',
'base/openssl_private_key_store_android.cc',
'base/openssl_private_key_store_memory.cc',
'base/platform_mime_util.h',
# TODO(tc): gnome-vfs? xdgmime? /etc/mime.types?
'base/platform_mime_util_linux.cc',
'base/platform_mime_util_mac.mm',
'base/platform_mime_util_win.cc',
'base/prioritized_dispatcher.cc',
'base/prioritized_dispatcher.h',
'base/priority_queue.h',
'base/rand_callback.h',
'base/registry_controlled_domains/registry_controlled_domain.cc',
'base/registry_controlled_domains/registry_controlled_domain.h',
'base/request_priority.h',
'base/sdch_filter.cc',
'base/sdch_filter.h',
'base/sdch_manager.cc',
'base/sdch_manager.h',
'base/static_cookie_policy.cc',
'base/static_cookie_policy.h',
'base/sys_addrinfo.h',
'base/test_data_stream.cc',
'base/test_data_stream.h',
'base/upload_bytes_element_reader.cc',
'base/upload_bytes_element_reader.h',
'base/upload_data.cc',
'base/upload_data.h',
'base/upload_data_stream.cc',
'base/upload_data_stream.h',
'base/upload_element.cc',
'base/upload_element.h',
'base/upload_element_reader.cc',
'base/upload_element_reader.h',
'base/upload_file_element_reader.cc',
'base/upload_file_element_reader.h',
'base/upload_progress.h',
'base/url_util.cc',
'base/url_util.h',
'base/winsock_init.cc',
'base/winsock_init.h',
'base/winsock_util.cc',
'base/winsock_util.h',
'base/zap.cc',
'base/zap.h',
'cert/asn1_util.cc',
'cert/asn1_util.h',
'cert/cert_database.cc',
'cert/cert_database.h',
'cert/cert_database_android.cc',
'cert/cert_database_ios.cc',
'cert/cert_database_mac.cc',
'cert/cert_database_nss.cc',
'cert/cert_database_openssl.cc',
'cert/cert_database_win.cc',
'cert/cert_status_flags.cc',
'cert/cert_status_flags.h',
'cert/cert_trust_anchor_provider.h',
'cert/cert_verifier.cc',
'cert/cert_verifier.h',
'cert/cert_verify_proc.cc',
'cert/cert_verify_proc.h',
'cert/cert_verify_proc_android.cc',
'cert/cert_verify_proc_android.h',
'cert/cert_verify_proc_mac.cc',
'cert/cert_verify_proc_mac.h',
'cert/cert_verify_proc_nss.cc',
'cert/cert_verify_proc_nss.h',
'cert/cert_verify_proc_openssl.cc',
'cert/cert_verify_proc_openssl.h',
'cert/cert_verify_proc_win.cc',
'cert/cert_verify_proc_win.h',
'cert/cert_verify_result.cc',
'cert/cert_verify_result.h',
'cert/crl_set.cc',
'cert/crl_set.h',
'cert/ev_root_ca_metadata.cc',
'cert/ev_root_ca_metadata.h',
'cert/multi_threaded_cert_verifier.cc',
'cert/multi_threaded_cert_verifier.h',
'cert/nss_cert_database.cc',
'cert/nss_cert_database.h',
'cert/pem_tokenizer.cc',
'cert/pem_tokenizer.h',
'cert/single_request_cert_verifier.cc',
'cert/single_request_cert_verifier.h',
'cert/test_root_certs.cc',
'cert/test_root_certs.h',
'cert/test_root_certs_mac.cc',
'cert/test_root_certs_nss.cc',
'cert/test_root_certs_openssl.cc',
'cert/test_root_certs_android.cc',
'cert/test_root_certs_win.cc',
'cert/x509_cert_types.cc',
'cert/x509_cert_types.h',
'cert/x509_cert_types_mac.cc',
'cert/x509_cert_types_win.cc',
'cert/x509_certificate.cc',
'cert/x509_certificate.h',
'cert/x509_certificate_ios.cc',
'cert/x509_certificate_mac.cc',
'cert/x509_certificate_net_log_param.cc',
'cert/x509_certificate_net_log_param.h',
'cert/x509_certificate_nss.cc',
'cert/x509_certificate_openssl.cc',
'cert/x509_certificate_win.cc',
'cert/x509_util.h',
'cert/x509_util.cc',
'cert/x509_util_ios.cc',
'cert/x509_util_ios.h',
'cert/x509_util_mac.cc',
'cert/x509_util_mac.h',
'cert/x509_util_nss.cc',
'cert/x509_util_nss.h',
'cert/x509_util_openssl.cc',
'cert/x509_util_openssl.h',
'cookies/canonical_cookie.cc',
'cookies/canonical_cookie.h',
'cookies/cookie_monster.cc',
'cookies/cookie_monster.h',
'cookies/cookie_options.h',
'cookies/cookie_store.cc',
'cookies/cookie_store.h',
'cookies/cookie_util.cc',
'cookies/cookie_util.h',
'cookies/parsed_cookie.cc',
'cookies/parsed_cookie.h',
'disk_cache/addr.cc',
'disk_cache/addr.h',
'disk_cache/backend_impl.cc',
'disk_cache/backend_impl.h',
'disk_cache/bitmap.cc',
'disk_cache/bitmap.h',
'disk_cache/block_files.cc',
'disk_cache/block_files.h',
'disk_cache/cache_creator.cc',
'disk_cache/cache_util.h',
'disk_cache/cache_util.cc',
'disk_cache/cache_util_posix.cc',
'disk_cache/cache_util_win.cc',
'disk_cache/disk_cache.h',
'disk_cache/disk_format.cc',
'disk_cache/disk_format.h',
'disk_cache/entry_impl.cc',
'disk_cache/entry_impl.h',
'disk_cache/errors.h',
'disk_cache/eviction.cc',
'disk_cache/eviction.h',
'disk_cache/experiments.h',
'disk_cache/file.cc',
'disk_cache/file.h',
'disk_cache/file_block.h',
'disk_cache/file_lock.cc',
'disk_cache/file_lock.h',
'disk_cache/file_posix.cc',
'disk_cache/file_win.cc',
'disk_cache/histogram_macros.h',
'disk_cache/in_flight_backend_io.cc',
'disk_cache/in_flight_backend_io.h',
'disk_cache/in_flight_io.cc',
'disk_cache/in_flight_io.h',
'disk_cache/mapped_file.h',
'disk_cache/mapped_file_posix.cc',
'disk_cache/mapped_file_avoid_mmap_posix.cc',
'disk_cache/mapped_file_win.cc',
'disk_cache/mem_backend_impl.cc',
'disk_cache/mem_backend_impl.h',
'disk_cache/mem_entry_impl.cc',
'disk_cache/mem_entry_impl.h',
'disk_cache/mem_rankings.cc',
'disk_cache/mem_rankings.h',
'disk_cache/net_log_parameters.cc',
'disk_cache/net_log_parameters.h',
'disk_cache/rankings.cc',
'disk_cache/rankings.h',
'disk_cache/sparse_control.cc',
'disk_cache/sparse_control.h',
'disk_cache/stats.cc',
'disk_cache/stats.h',
'disk_cache/stats_histogram.cc',
'disk_cache/stats_histogram.h',
'disk_cache/storage_block-inl.h',
'disk_cache/storage_block.h',
'disk_cache/stress_support.h',
'disk_cache/trace.cc',
'disk_cache/trace.h',
'disk_cache/simple/simple_backend_impl.cc',
'disk_cache/simple/simple_backend_impl.h',
'disk_cache/simple/simple_disk_format.cc',
'disk_cache/simple/simple_disk_format.h',
'disk_cache/simple/simple_entry_impl.cc',
'disk_cache/simple/simple_entry_impl.h',
'disk_cache/simple/simple_index.cc',
'disk_cache/simple/simple_index.h',
'disk_cache/simple/simple_synchronous_entry.cc',
'disk_cache/simple/simple_synchronous_entry.h',
'disk_cache/flash/flash_entry_impl.cc',
'disk_cache/flash/flash_entry_impl.h',
'disk_cache/flash/format.h',
'disk_cache/flash/internal_entry.cc',
'disk_cache/flash/internal_entry.h',
'disk_cache/flash/log_store.cc',
'disk_cache/flash/log_store.h',
'disk_cache/flash/log_store_entry.cc',
'disk_cache/flash/log_store_entry.h',
'disk_cache/flash/segment.cc',
'disk_cache/flash/segment.h',
'disk_cache/flash/storage.cc',
'disk_cache/flash/storage.h',
'dns/address_sorter.h',
'dns/address_sorter_posix.cc',
'dns/address_sorter_posix.h',
'dns/address_sorter_win.cc',
'dns/dns_client.cc',
'dns/dns_client.h',
'dns/dns_config_service.cc',
'dns/dns_config_service.h',
'dns/dns_config_service_posix.cc',
'dns/dns_config_service_posix.h',
'dns/dns_config_service_win.cc',
'dns/dns_config_service_win.h',
'dns/dns_hosts.cc',
'dns/dns_hosts.h',
'dns/dns_protocol.h',
'dns/dns_query.cc',
'dns/dns_query.h',
'dns/dns_response.cc',
'dns/dns_response.h',
'dns/dns_session.cc',
'dns/dns_session.h',
'dns/dns_socket_pool.cc',
'dns/dns_socket_pool.h',
'dns/dns_transaction.cc',
'dns/dns_transaction.h',
'dns/host_cache.cc',
'dns/host_cache.h',
'dns/host_resolver.cc',
'dns/host_resolver.h',
'dns/host_resolver_impl.cc',
'dns/host_resolver_impl.h',
'dns/host_resolver_proc.cc',
'dns/host_resolver_proc.h',
'dns/mapped_host_resolver.cc',
'dns/mapped_host_resolver.h',
'dns/notify_watcher_mac.cc',
'dns/notify_watcher_mac.h',
'dns/serial_worker.cc',
'dns/serial_worker.h',
'dns/single_request_host_resolver.cc',
'dns/single_request_host_resolver.h',
'ftp/ftp_auth_cache.cc',
'ftp/ftp_auth_cache.h',
'ftp/ftp_ctrl_response_buffer.cc',
'ftp/ftp_ctrl_response_buffer.h',
'ftp/ftp_directory_listing_parser.cc',
'ftp/ftp_directory_listing_parser.h',
'ftp/ftp_directory_listing_parser_ls.cc',
'ftp/ftp_directory_listing_parser_ls.h',
'ftp/ftp_directory_listing_parser_netware.cc',
'ftp/ftp_directory_listing_parser_netware.h',
'ftp/ftp_directory_listing_parser_os2.cc',
'ftp/ftp_directory_listing_parser_os2.h',
'ftp/ftp_directory_listing_parser_vms.cc',
'ftp/ftp_directory_listing_parser_vms.h',
'ftp/ftp_directory_listing_parser_windows.cc',
'ftp/ftp_directory_listing_parser_windows.h',
'ftp/ftp_network_layer.cc',
'ftp/ftp_network_layer.h',
'ftp/ftp_network_session.cc',
'ftp/ftp_network_session.h',
'ftp/ftp_network_transaction.cc',
'ftp/ftp_network_transaction.h',
'ftp/ftp_request_info.h',
'ftp/ftp_response_info.cc',
'ftp/ftp_response_info.h',
'ftp/ftp_server_type_histograms.cc',
'ftp/ftp_server_type_histograms.h',
'ftp/ftp_transaction.h',
'ftp/ftp_transaction_factory.h',
'ftp/ftp_util.cc',
'ftp/ftp_util.h',
'http/des.cc',
'http/des.h',
'http/http_atom_list.h',
'http/http_auth.cc',
'http/http_auth.h',
'http/http_auth_cache.cc',
'http/http_auth_cache.h',
'http/http_auth_controller.cc',
'http/http_auth_controller.h',
'http/http_auth_filter.cc',
'http/http_auth_filter.h',
'http/http_auth_filter_win.h',
'http/http_auth_gssapi_posix.cc',
'http/http_auth_gssapi_posix.h',
'http/http_auth_handler.cc',
'http/http_auth_handler.h',
'http/http_auth_handler_basic.cc',
'http/http_auth_handler_basic.h',
'http/http_auth_handler_digest.cc',
'http/http_auth_handler_digest.h',
'http/http_auth_handler_factory.cc',
'http/http_auth_handler_factory.h',
'http/http_auth_handler_negotiate.cc',
'http/http_auth_handler_negotiate.h',
'http/http_auth_handler_ntlm.cc',
'http/http_auth_handler_ntlm.h',
'http/http_auth_handler_ntlm_portable.cc',
'http/http_auth_handler_ntlm_win.cc',
'http/http_auth_sspi_win.cc',
'http/http_auth_sspi_win.h',
'http/http_basic_stream.cc',
'http/http_basic_stream.h',
'http/http_byte_range.cc',
'http/http_byte_range.h',
'http/http_cache.cc',
'http/http_cache.h',
'http/http_cache_transaction.cc',
'http/http_cache_transaction.h',
'http/http_content_disposition.cc',
'http/http_content_disposition.h',
'http/http_chunked_decoder.cc',
'http/http_chunked_decoder.h',
'http/http_network_layer.cc',
'http/http_network_layer.h',
'http/http_network_session.cc',
'http/http_network_session.h',
'http/http_network_session_peer.cc',
'http/http_network_session_peer.h',
'http/http_network_transaction.cc',
'http/http_network_transaction.h',
'http/http_pipelined_connection.h',
'http/http_pipelined_connection_impl.cc',
'http/http_pipelined_connection_impl.h',
'http/http_pipelined_host.cc',
'http/http_pipelined_host.h',
'http/http_pipelined_host_capability.h',
'http/http_pipelined_host_forced.cc',
'http/http_pipelined_host_forced.h',
'http/http_pipelined_host_impl.cc',
'http/http_pipelined_host_impl.h',
'http/http_pipelined_host_pool.cc',
'http/http_pipelined_host_pool.h',
'http/http_pipelined_stream.cc',
'http/http_pipelined_stream.h',
'http/http_proxy_client_socket.cc',
'http/http_proxy_client_socket.h',
'http/http_proxy_client_socket_pool.cc',
'http/http_proxy_client_socket_pool.h',
'http/http_request_headers.cc',
'http/http_request_headers.h',
'http/http_request_info.cc',
'http/http_request_info.h',
'http/http_response_body_drainer.cc',
'http/http_response_body_drainer.h',
'http/http_response_headers.cc',
'http/http_response_headers.h',
'http/http_response_info.cc',
'http/http_response_info.h',
'http/http_security_headers.cc',
'http/http_security_headers.h',
'http/http_server_properties.cc',
'http/http_server_properties.h',
'http/http_server_properties_impl.cc',
'http/http_server_properties_impl.h',
'http/http_status_code.h',
'http/http_stream.h',
'http/http_stream_base.h',
'http/http_stream_factory.cc',
'http/http_stream_factory.h',
'http/http_stream_factory_impl.cc',
'http/http_stream_factory_impl.h',
'http/http_stream_factory_impl_job.cc',
'http/http_stream_factory_impl_job.h',
'http/http_stream_factory_impl_request.cc',
'http/http_stream_factory_impl_request.h',
'http/http_stream_parser.cc',
'http/http_stream_parser.h',
'http/http_transaction.h',
'http/http_transaction_delegate.h',
'http/http_transaction_factory.h',
'http/http_util.cc',
'http/http_util.h',
'http/http_util_icu.cc',
'http/http_vary_data.cc',
'http/http_vary_data.h',
'http/http_version.h',
'http/md4.cc',
'http/md4.h',
'http/partial_data.cc',
'http/partial_data.h',
'http/proxy_client_socket.h',
'http/proxy_client_socket.cc',
'http/transport_security_state.cc',
'http/transport_security_state.h',
'http/transport_security_state_static.h',
'http/url_security_manager.cc',
'http/url_security_manager.h',
'http/url_security_manager_posix.cc',
'http/url_security_manager_win.cc',
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'proxy/dhcp_proxy_script_adapter_fetcher_win.cc',
'proxy/dhcp_proxy_script_adapter_fetcher_win.h',
'proxy/dhcp_proxy_script_fetcher.cc',
'proxy/dhcp_proxy_script_fetcher.h',
'proxy/dhcp_proxy_script_fetcher_factory.cc',
'proxy/dhcp_proxy_script_fetcher_factory.h',
'proxy/dhcp_proxy_script_fetcher_win.cc',
'proxy/dhcp_proxy_script_fetcher_win.h',
'proxy/dhcpcsvc_init_win.cc',
'proxy/dhcpcsvc_init_win.h',
'proxy/multi_threaded_proxy_resolver.cc',
'proxy/multi_threaded_proxy_resolver.h',
'proxy/network_delegate_error_observer.cc',
'proxy/network_delegate_error_observer.h',
'proxy/polling_proxy_config_service.cc',
'proxy/polling_proxy_config_service.h',
'proxy/proxy_bypass_rules.cc',
'proxy/proxy_bypass_rules.h',
'proxy/proxy_config.cc',
'proxy/proxy_config.h',
'proxy/proxy_config_service.h',
'proxy/proxy_config_service_android.cc',
'proxy/proxy_config_service_android.h',
'proxy/proxy_config_service_fixed.cc',
'proxy/proxy_config_service_fixed.h',
'proxy/proxy_config_service_ios.cc',
'proxy/proxy_config_service_ios.h',
'proxy/proxy_config_service_linux.cc',
'proxy/proxy_config_service_linux.h',
'proxy/proxy_config_service_mac.cc',
'proxy/proxy_config_service_mac.h',
'proxy/proxy_config_service_win.cc',
'proxy/proxy_config_service_win.h',
'proxy/proxy_config_source.cc',
'proxy/proxy_config_source.h',
'proxy/proxy_info.cc',
'proxy/proxy_info.h',
'proxy/proxy_list.cc',
'proxy/proxy_list.h',
'proxy/proxy_resolver.h',
'proxy/proxy_resolver_error_observer.h',
'proxy/proxy_resolver_mac.cc',
'proxy/proxy_resolver_mac.h',
'proxy/proxy_resolver_script.h',
'proxy/proxy_resolver_script_data.cc',
'proxy/proxy_resolver_script_data.h',
'proxy/proxy_resolver_winhttp.cc',
'proxy/proxy_resolver_winhttp.h',
'proxy/proxy_retry_info.h',
'proxy/proxy_script_decider.cc',
'proxy/proxy_script_decider.h',
'proxy/proxy_script_fetcher.h',
'proxy/proxy_script_fetcher_impl.cc',
'proxy/proxy_script_fetcher_impl.h',
'proxy/proxy_server.cc',
'proxy/proxy_server.h',
'proxy/proxy_server_mac.cc',
'proxy/proxy_service.cc',
'proxy/proxy_service.h',
'quic/blocked_list.h',
'quic/congestion_control/available_channel_estimator.cc',
'quic/congestion_control/available_channel_estimator.h',
'quic/congestion_control/channel_estimator.cc',
'quic/congestion_control/channel_estimator.h',
'quic/congestion_control/cube_root.cc',
'quic/congestion_control/cube_root.h',
'quic/congestion_control/cubic.cc',
'quic/congestion_control/cubic.h',
'quic/congestion_control/fix_rate_receiver.cc',
'quic/congestion_control/fix_rate_receiver.h',
'quic/congestion_control/fix_rate_sender.cc',
'quic/congestion_control/fix_rate_sender.h',
'quic/congestion_control/hybrid_slow_start.cc',
'quic/congestion_control/hybrid_slow_start.h',
'quic/congestion_control/inter_arrival_bitrate_ramp_up.cc',
'quic/congestion_control/inter_arrival_bitrate_ramp_up.h',
'quic/congestion_control/inter_arrival_overuse_detector.cc',
'quic/congestion_control/inter_arrival_overuse_detector.h',
'quic/congestion_control/inter_arrival_probe.cc',
'quic/congestion_control/inter_arrival_probe.h',
'quic/congestion_control/inter_arrival_receiver.cc',
'quic/congestion_control/inter_arrival_receiver.h',
'quic/congestion_control/inter_arrival_sender.cc',
'quic/congestion_control/inter_arrival_sender.h',
'quic/congestion_control/inter_arrival_state_machine.cc',
'quic/congestion_control/inter_arrival_state_machine.h',
'quic/congestion_control/leaky_bucket.cc',
'quic/congestion_control/leaky_bucket.h',
'quic/congestion_control/paced_sender.cc',
'quic/congestion_control/paced_sender.h',
'quic/congestion_control/quic_congestion_manager.cc',
'quic/congestion_control/quic_congestion_manager.h',
'quic/congestion_control/quic_max_sized_map.h',
'quic/congestion_control/receive_algorithm_interface.cc',
'quic/congestion_control/receive_algorithm_interface.h',
'quic/congestion_control/send_algorithm_interface.cc',
'quic/congestion_control/send_algorithm_interface.h',
'quic/congestion_control/tcp_cubic_sender.cc',
'quic/congestion_control/tcp_cubic_sender.h',
'quic/congestion_control/tcp_receiver.cc',
'quic/congestion_control/tcp_receiver.h',
'quic/crypto/aes_128_gcm_decrypter.h',
'quic/crypto/aes_128_gcm_decrypter_nss.cc',
'quic/crypto/aes_128_gcm_decrypter_openssl.cc',
'quic/crypto/aes_128_gcm_encrypter.h',
'quic/crypto/aes_128_gcm_encrypter_nss.cc',
'quic/crypto/aes_128_gcm_encrypter_openssl.cc',
'quic/crypto/crypto_framer.cc',
'quic/crypto/crypto_framer.h',
'quic/crypto/crypto_handshake.cc',
'quic/crypto/crypto_handshake.h',
'quic/crypto/crypto_protocol.h',
'quic/crypto/crypto_utils.cc',
'quic/crypto/crypto_utils.h',
'quic/crypto/curve25519_key_exchange.cc',
'quic/crypto/curve25519_key_exchange.h',
'quic/crypto/key_exchange.h',
'quic/crypto/null_decrypter.cc',
'quic/crypto/null_decrypter.h',
'quic/crypto/null_encrypter.cc',
'quic/crypto/null_encrypter.h',
'quic/crypto/p256_key_exchange.h',
'quic/crypto/p256_key_exchange_nss.cc',
'quic/crypto/p256_key_exchange_openssl.cc',
'quic/crypto/quic_decrypter.cc',
'quic/crypto/quic_decrypter.h',
'quic/crypto/quic_encrypter.cc',
'quic/crypto/quic_encrypter.h',
'quic/crypto/quic_random.cc',
'quic/crypto/quic_random.h',
'quic/crypto/scoped_evp_cipher_ctx.h',
'quic/crypto/strike_register.cc',
'quic/crypto/strike_register.h',
'quic/quic_bandwidth.cc',
'quic/quic_bandwidth.h',
'quic/quic_blocked_writer_interface.h',
'quic/quic_client_session.cc',
'quic/quic_client_session.h',
'quic/quic_crypto_client_stream.cc',
'quic/quic_crypto_client_stream.h',
'quic/quic_crypto_client_stream_factory.h',
'quic/quic_crypto_server_stream.cc',
'quic/quic_crypto_server_stream.h',
'quic/quic_crypto_stream.cc',
'quic/quic_crypto_stream.h',
'quic/quic_clock.cc',
'quic/quic_clock.h',
'quic/quic_connection.cc',
'quic/quic_connection.h',
'quic/quic_connection_helper.cc',
'quic/quic_connection_helper.h',
'quic/quic_connection_logger.cc',
'quic/quic_connection_logger.h',
'quic/quic_data_reader.cc',
'quic/quic_data_reader.h',
'quic/quic_data_writer.cc',
'quic/quic_data_writer.h',
'quic/quic_fec_group.cc',
'quic/quic_fec_group.h',
'quic/quic_framer.cc',
'quic/quic_framer.h',
'quic/quic_http_stream.cc',
'quic/quic_http_stream.h',
'quic/quic_packet_creator.cc',
'quic/quic_packet_creator.h',
'quic/quic_packet_entropy_manager.cc',
'quic/quic_packet_entropy_manager.h',
'quic/quic_packet_generator.cc',
'quic/quic_packet_generator.h',
'quic/quic_protocol.cc',
'quic/quic_protocol.h',
'quic/quic_reliable_client_stream.cc',
'quic/quic_reliable_client_stream.h',
'quic/quic_session.cc',
'quic/quic_session.h',
'quic/quic_stats.cc',
'quic/quic_stats.h',
'quic/quic_stream_factory.cc',
'quic/quic_stream_factory.h',
'quic/quic_stream_sequencer.cc',
'quic/quic_stream_sequencer.h',
'quic/quic_time.cc',
'quic/quic_time.h',
'quic/quic_utils.cc',
'quic/quic_utils.h',
'quic/reliable_quic_stream.cc',
'quic/reliable_quic_stream.h',
'socket/buffered_write_stream_socket.cc',
'socket/buffered_write_stream_socket.h',
'socket/client_socket_factory.cc',
'socket/client_socket_factory.h',
'socket/client_socket_handle.cc',
'socket/client_socket_handle.h',
'socket/client_socket_pool.cc',
'socket/client_socket_pool.h',
'socket/client_socket_pool_base.cc',
'socket/client_socket_pool_base.h',
'socket/client_socket_pool_histograms.cc',
'socket/client_socket_pool_histograms.h',
'socket/client_socket_pool_manager.cc',
'socket/client_socket_pool_manager.h',
'socket/client_socket_pool_manager_impl.cc',
'socket/client_socket_pool_manager_impl.h',
'socket/next_proto.h',
'socket/nss_ssl_util.cc',
'socket/nss_ssl_util.h',
'socket/server_socket.h',
'socket/socket_net_log_params.cc',
'socket/socket_net_log_params.h',
'socket/socket.h',
'socket/socks5_client_socket.cc',
'socket/socks5_client_socket.h',
'socket/socks_client_socket.cc',
'socket/socks_client_socket.h',
'socket/socks_client_socket_pool.cc',
'socket/socks_client_socket_pool.h',
'socket/ssl_client_socket.cc',
'socket/ssl_client_socket.h',
'socket/ssl_client_socket_nss.cc',
'socket/ssl_client_socket_nss.h',
'socket/ssl_client_socket_openssl.cc',
'socket/ssl_client_socket_openssl.h',
'socket/ssl_client_socket_pool.cc',
'socket/ssl_client_socket_pool.h',
'socket/ssl_error_params.cc',
'socket/ssl_error_params.h',
'socket/ssl_server_socket.h',
'socket/ssl_server_socket_nss.cc',
'socket/ssl_server_socket_nss.h',
'socket/ssl_server_socket_openssl.cc',
'socket/ssl_socket.h',
'socket/stream_listen_socket.cc',
'socket/stream_listen_socket.h',
'socket/stream_socket.cc',
'socket/stream_socket.h',
'socket/tcp_client_socket.cc',
'socket/tcp_client_socket.h',
'socket/tcp_client_socket_libevent.cc',
'socket/tcp_client_socket_libevent.h',
'socket/tcp_client_socket_win.cc',
'socket/tcp_client_socket_win.h',
'socket/tcp_listen_socket.cc',
'socket/tcp_listen_socket.h',
'socket/tcp_server_socket.h',
'socket/tcp_server_socket_libevent.cc',
'socket/tcp_server_socket_libevent.h',
'socket/tcp_server_socket_win.cc',
'socket/tcp_server_socket_win.h',
'socket/transport_client_socket_pool.cc',
'socket/transport_client_socket_pool.h',
'socket/unix_domain_socket_posix.cc',
'socket/unix_domain_socket_posix.h',
'socket_stream/socket_stream.cc',
'socket_stream/socket_stream.h',
'socket_stream/socket_stream_job.cc',
'socket_stream/socket_stream_job.h',
'socket_stream/socket_stream_job_manager.cc',
'socket_stream/socket_stream_job_manager.h',
'socket_stream/socket_stream_metrics.cc',
'socket_stream/socket_stream_metrics.h',
'spdy/buffered_spdy_framer.cc',
'spdy/buffered_spdy_framer.h',
'spdy/spdy_bitmasks.h',
'spdy/spdy_credential_builder.cc',
'spdy/spdy_credential_builder.h',
'spdy/spdy_credential_state.cc',
'spdy/spdy_credential_state.h',
'spdy/spdy_frame_builder.cc',
'spdy/spdy_frame_builder.h',
'spdy/spdy_frame_reader.cc',
'spdy/spdy_frame_reader.h',
'spdy/spdy_framer.cc',
'spdy/spdy_framer.h',
'spdy/spdy_header_block.cc',
'spdy/spdy_header_block.h',
'spdy/spdy_http_stream.cc',
'spdy/spdy_http_stream.h',
'spdy/spdy_http_utils.cc',
'spdy/spdy_http_utils.h',
'spdy/spdy_io_buffer.cc',
'spdy/spdy_io_buffer.h',
'spdy/spdy_priority_forest.h',
'spdy/spdy_protocol.cc',
'spdy/spdy_protocol.h',
'spdy/spdy_proxy_client_socket.cc',
'spdy/spdy_proxy_client_socket.h',
'spdy/spdy_session.cc',
'spdy/spdy_session.h',
'spdy/spdy_session_pool.cc',
'spdy/spdy_session_pool.h',
'spdy/spdy_stream.cc',
'spdy/spdy_stream.h',
'spdy/spdy_websocket_stream.cc',
'spdy/spdy_websocket_stream.h',
'ssl/client_cert_store.h',
'ssl/client_cert_store_impl.h',
'ssl/client_cert_store_impl_mac.cc',
'ssl/client_cert_store_impl_nss.cc',
'ssl/client_cert_store_impl_win.cc',
'ssl/default_server_bound_cert_store.cc',
'ssl/default_server_bound_cert_store.h',
'ssl/openssl_client_key_store.cc',
'ssl/openssl_client_key_store.h',
'ssl/server_bound_cert_service.cc',
'ssl/server_bound_cert_service.h',
'ssl/server_bound_cert_store.cc',
'ssl/server_bound_cert_store.h',
'ssl/ssl_cert_request_info.cc',
'ssl/ssl_cert_request_info.h',
'ssl/ssl_cipher_suite_names.cc',
'ssl/ssl_cipher_suite_names.h',
'ssl/ssl_client_auth_cache.cc',
'ssl/ssl_client_auth_cache.h',
'ssl/ssl_client_cert_type.h',
'ssl/ssl_config_service.cc',
'ssl/ssl_config_service.h',
'ssl/ssl_config_service_defaults.cc',
'ssl/ssl_config_service_defaults.h',
'ssl/ssl_info.cc',
'ssl/ssl_info.h',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
'udp/datagram_client_socket.h',
'udp/datagram_server_socket.h',
'udp/datagram_socket.h',
'udp/udp_client_socket.cc',
'udp/udp_client_socket.h',
'udp/udp_net_log_parameters.cc',
'udp/udp_net_log_parameters.h',
'udp/udp_server_socket.cc',
'udp/udp_server_socket.h',
'udp/udp_socket.h',
'udp/udp_socket_libevent.cc',
'udp/udp_socket_libevent.h',
'udp/udp_socket_win.cc',
'udp/udp_socket_win.h',
'url_request/data_protocol_handler.cc',
'url_request/data_protocol_handler.h',
'url_request/file_protocol_handler.cc',
'url_request/file_protocol_handler.h',
'url_request/fraudulent_certificate_reporter.h',
'url_request/ftp_protocol_handler.cc',
'url_request/ftp_protocol_handler.h',
'url_request/http_user_agent_settings.h',
'url_request/protocol_intercept_job_factory.cc',
'url_request/protocol_intercept_job_factory.h',
'url_request/static_http_user_agent_settings.cc',
'url_request/static_http_user_agent_settings.h',
'url_request/url_fetcher.cc',
'url_request/url_fetcher.h',
'url_request/url_fetcher_core.cc',
'url_request/url_fetcher_core.h',
'url_request/url_fetcher_delegate.cc',
'url_request/url_fetcher_delegate.h',
'url_request/url_fetcher_factory.h',
'url_request/url_fetcher_impl.cc',
'url_request/url_fetcher_impl.h',
'url_request/url_fetcher_response_writer.cc',
'url_request/url_fetcher_response_writer.h',
'url_request/url_request.cc',
'url_request/url_request.h',
'url_request/url_request_about_job.cc',
'url_request/url_request_about_job.h',
'url_request/url_request_context.cc',
'url_request/url_request_context.h',
'url_request/url_request_context_builder.cc',
'url_request/url_request_context_builder.h',
'url_request/url_request_context_getter.cc',
'url_request/url_request_context_getter.h',
'url_request/url_request_context_storage.cc',
'url_request/url_request_context_storage.h',
'url_request/url_request_data_job.cc',
'url_request/url_request_data_job.h',
'url_request/url_request_error_job.cc',
'url_request/url_request_error_job.h',
'url_request/url_request_file_dir_job.cc',
'url_request/url_request_file_dir_job.h',
'url_request/url_request_file_job.cc',
'url_request/url_request_file_job.h',
'url_request/url_request_filter.cc',
'url_request/url_request_filter.h',
'url_request/url_request_ftp_job.cc',
'url_request/url_request_ftp_job.h',
'url_request/url_request_http_job.cc',
'url_request/url_request_http_job.h',
'url_request/url_request_job.cc',
'url_request/url_request_job.h',
'url_request/url_request_job_factory.cc',
'url_request/url_request_job_factory.h',
'url_request/url_request_job_factory_impl.cc',
'url_request/url_request_job_factory_impl.h',
'url_request/url_request_job_manager.cc',
'url_request/url_request_job_manager.h',
'url_request/url_request_netlog_params.cc',
'url_request/url_request_netlog_params.h',
'url_request/url_request_redirect_job.cc',
'url_request/url_request_redirect_job.h',
'url_request/url_request_simple_job.cc',
'url_request/url_request_simple_job.h',
'url_request/url_request_status.h',
'url_request/url_request_test_job.cc',
'url_request/url_request_test_job.h',
'url_request/url_request_throttler_entry.cc',
'url_request/url_request_throttler_entry.h',
'url_request/url_request_throttler_entry_interface.h',
'url_request/url_request_throttler_header_adapter.cc',
'url_request/url_request_throttler_header_adapter.h',
'url_request/url_request_throttler_header_interface.h',
'url_request/url_request_throttler_manager.cc',
'url_request/url_request_throttler_manager.h',
'url_request/view_cache_helper.cc',
'url_request/view_cache_helper.h',
'websockets/websocket_errors.cc',
'websockets/websocket_errors.h',
'websockets/websocket_frame.cc',
'websockets/websocket_frame.h',
'websockets/websocket_frame_parser.cc',
'websockets/websocket_frame_parser.h',
'websockets/websocket_handshake_handler.cc',
'websockets/websocket_handshake_handler.h',
'websockets/websocket_job.cc',
'websockets/websocket_job.h',
'websockets/websocket_net_log_params.cc',
'websockets/websocket_net_log_params.h',
'websockets/websocket_stream.h',
'websockets/websocket_throttle.cc',
'websockets/websocket_throttle.h',
],
'defines': [
'NET_IMPLEMENTATION',
],
'export_dependent_settings': [
'../base/base.gyp:base',
],
'conditions': [
['chromeos==1', {
'sources!': [
'base/network_change_notifier_linux.cc',
'base/network_change_notifier_linux.h',
'base/network_change_notifier_netlink_linux.cc',
'base/network_change_notifier_netlink_linux.h',
'proxy/proxy_config_service_linux.cc',
'proxy/proxy_config_service_linux.h',
],
}],
['use_kerberos==1', {
'defines': [
'USE_KERBEROS',
],
'conditions': [
['OS=="openbsd"', {
'include_dirs': [
'/usr/include/kerberosV'
],
}],
['linux_link_kerberos==1', {
'link_settings': {
'ldflags': [
'<!@(krb5-config --libs gssapi)',
],
},
}, { # linux_link_kerberos==0
'defines': [
'DLOPEN_KERBEROS',
],
}],
],
}, { # use_kerberos == 0
'sources!': [
'http/http_auth_gssapi_posix.cc',
'http/http_auth_gssapi_posix.h',
'http/http_auth_handler_negotiate.h',
'http/http_auth_handler_negotiate.cc',
],
}],
['posix_avoid_mmap==1', {
'defines': [
'POSIX_AVOID_MMAP',
],
'direct_dependent_settings': {
'defines': [
'POSIX_AVOID_MMAP',
],
},
'sources!': [
'disk_cache/mapped_file_posix.cc',
],
}, { # else
'sources!': [
'disk_cache/mapped_file_avoid_mmap_posix.cc',
],
}],
['disable_ftp_support==1', {
'sources/': [
['exclude', '^ftp/'],
],
'sources!': [
'url_request/ftp_protocol_handler.cc',
'url_request/ftp_protocol_handler.h',
'url_request/url_request_ftp_job.cc',
'url_request/url_request_ftp_job.h',
],
}],
['enable_built_in_dns==1', {
'defines': [
'ENABLE_BUILT_IN_DNS',
]
}, { # else
'sources!': [
'dns/address_sorter_posix.cc',
'dns/address_sorter_posix.h',
'dns/dns_client.cc',
],
}],
['use_openssl==1', {
'sources!': [
'base/crypto_module_nss.cc',
'base/keygen_handler_nss.cc',
'base/nss_memio.c',
'base/nss_memio.h',
'cert/cert_database_nss.cc',
'cert/cert_verify_proc_nss.cc',
'cert/cert_verify_proc_nss.h',
'cert/nss_cert_database.cc',
'cert/nss_cert_database.h',
'cert/test_root_certs_nss.cc',
'cert/x509_certificate_nss.cc',
'cert/x509_util_nss.cc',
'cert/x509_util_nss.h',
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'quic/crypto/aes_128_gcm_decrypter_nss.cc',
'quic/crypto/aes_128_gcm_encrypter_nss.cc',
'quic/crypto/p256_key_exchange_nss.cc',
'socket/nss_ssl_util.cc',
'socket/nss_ssl_util.h',
'socket/ssl_client_socket_nss.cc',
'socket/ssl_client_socket_nss.h',
'socket/ssl_server_socket_nss.cc',
'socket/ssl_server_socket_nss.h',
'ssl/client_cert_store_impl_nss.cc',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
],
},
{ # else !use_openssl: remove the unneeded files
'sources!': [
'base/crypto_module_openssl.cc',
'base/keygen_handler_openssl.cc',
'base/openssl_private_key_store.h',
'base/openssl_private_key_store_android.cc',
'base/openssl_private_key_store_memory.cc',
'cert/cert_database_openssl.cc',
'cert/cert_verify_proc_openssl.cc',
'cert/cert_verify_proc_openssl.h',
'cert/test_root_certs_openssl.cc',
'cert/x509_certificate_openssl.cc',
'cert/x509_util_openssl.cc',
'cert/x509_util_openssl.h',
'quic/crypto/aes_128_gcm_decrypter_openssl.cc',
'quic/crypto/aes_128_gcm_encrypter_openssl.cc',
'quic/crypto/p256_key_exchange_openssl.cc',
'quic/crypto/scoped_evp_cipher_ctx.h',
'socket/ssl_client_socket_openssl.cc',
'socket/ssl_client_socket_openssl.h',
'socket/ssl_server_socket_openssl.cc',
'ssl/openssl_client_key_store.cc',
'ssl/openssl_client_key_store.h',
],
},
],
[ 'use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:gconf',
'../build/linux/system.gyp:gio',
],
'conditions': [
['use_openssl==1', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
},
{ # else use_openssl==0, use NSS
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}],
['os_bsd==1', {
'sources!': [
'base/network_change_notifier_linux.cc',
'base/network_change_notifier_netlink_linux.cc',
'proxy/proxy_config_service_linux.cc',
],
},{
'dependencies': [
'../build/linux/system.gyp:libresolv',
],
}],
['OS=="solaris"', {
'link_settings': {
'ldflags': [
'-R/usr/lib/mps',
],
},
}],
],
},
{ # else: OS is not in the above list
'sources!': [
'base/crypto_module_nss.cc',
'base/keygen_handler_nss.cc',
'cert/cert_database_nss.cc',
'cert/nss_cert_database.cc',
'cert/nss_cert_database.h',
'cert/test_root_certs_nss.cc',
'cert/x509_certificate_nss.cc',
'ocsp/nss_ocsp.cc',
'ocsp/nss_ocsp.h',
'third_party/mozilla_security_manager/nsKeygenHandler.cpp',
'third_party/mozilla_security_manager/nsKeygenHandler.h',
'third_party/mozilla_security_manager/nsNSSCertificateDB.cpp',
'third_party/mozilla_security_manager/nsNSSCertificateDB.h',
'third_party/mozilla_security_manager/nsPKCS12Blob.cpp',
'third_party/mozilla_security_manager/nsPKCS12Blob.h',
],
},
],
[ 'toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gdk',
],
}],
[ 'use_nss != 1', {
'sources!': [
'cert/cert_verify_proc_nss.cc',
'cert/cert_verify_proc_nss.h',
'ssl/client_cert_store_impl_nss.cc',
],
}],
[ 'enable_websockets != 1', {
'sources/': [
['exclude', '^socket_stream/'],
['exclude', '^websockets/'],
],
'sources!': [
'spdy/spdy_websocket_stream.cc',
'spdy/spdy_websocket_stream.h',
],
}],
[ 'OS == "win"', {
'sources!': [
'http/http_auth_handler_ntlm_portable.cc',
'socket/tcp_client_socket_libevent.cc',
'socket/tcp_client_socket_libevent.h',
'socket/tcp_server_socket_libevent.cc',
'socket/tcp_server_socket_libevent.h',
'ssl/client_cert_store_impl_nss.cc',
'udp/udp_socket_libevent.cc',
'udp/udp_socket_libevent.h',
],
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
'tld_cleanup',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
}, { # else: OS != "win"
'sources!': [
'base/winsock_init.cc',
'base/winsock_init.h',
'base/winsock_util.cc',
'base/winsock_util.h',
'proxy/proxy_resolver_winhttp.cc',
'proxy/proxy_resolver_winhttp.h',
],
},
],
[ 'OS == "mac"', {
'sources!': [
'ssl/client_cert_store_impl_nss.cc',
],
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SystemConfiguration.framework',
'$(SDKROOT)/usr/lib/libresolv.dylib',
]
},
},
],
[ 'OS == "ios"', {
'dependencies': [
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CFNetwork.framework',
'$(SDKROOT)/System/Library/Frameworks/MobileCoreServices.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SystemConfiguration.framework',
'$(SDKROOT)/usr/lib/libresolv.dylib',
],
},
},
],
['OS=="android" and _toolset=="target" and android_webview_build == 0', {
'dependencies': [
'net_java',
],
}],
[ 'OS == "android"', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
'net_jni_headers',
],
'sources!': [
'base/openssl_private_key_store_memory.cc',
'cert/cert_database_openssl.cc',
'cert/cert_verify_proc_openssl.cc',
'cert/test_root_certs_openssl.cc',
],
# The net/android/keystore_openssl.cc source file needs to
# access an OpenSSL-internal header.
'include_dirs': [
'../third_party/openssl',
],
}, { # else OS != "android"
'defines': [
# These are the features Android doesn't support.
'ENABLE_MEDIA_CODEC_THEORA',
],
},
],
[ 'OS == "linux"', {
'dependencies': [
'../build/linux/system.gyp:dbus',
'../dbus/dbus.gyp:dbus',
],
},
],
],
'target_conditions': [
# These source files are excluded by default platform rules, but they
# are needed in specific cases on other platforms. Re-including them can
# only be done in target_conditions as it is evaluated after the
# platform rules.
['OS == "android"', {
'sources/': [
['include', '^base/platform_mime_util_linux\\.cc$'],
],
}],
['OS == "ios"', {
'sources/': [
['include', '^base/network_change_notifier_mac\\.cc$'],
['include', '^base/network_config_watcher_mac\\.cc$'],
['include', '^base/platform_mime_util_mac\\.mm$'],
# The iOS implementation only partially uses NSS and thus does not
# defines |use_nss|. In particular the |USE_NSS| preprocessor
# definition is not used. The following files are needed though:
['include', '^cert/cert_verify_proc_nss\\.cc$'],
['include', '^cert/cert_verify_proc_nss\\.h$'],
['include', '^cert/test_root_certs_nss\\.cc$'],
['include', '^cert/x509_util_nss\\.cc$'],
['include', '^cert/x509_util_nss\\.h$'],
['include', '^dns/notify_watcher_mac\\.cc$'],
['include', '^proxy/proxy_resolver_mac\\.cc$'],
['include', '^proxy/proxy_server_mac\\.cc$'],
['include', '^ocsp/nss_ocsp\\.cc$'],
['include', '^ocsp/nss_ocsp\\.h$'],
],
}],
],
},
{
'target_name': 'net_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../crypto/crypto.gyp:crypto',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/zlib/zlib.gyp:zlib',
'net',
'net_test_support',
],
'sources': [
'android/keystore_unittest.cc',
'android/network_change_notifier_android_unittest.cc',
'base/address_list_unittest.cc',
'base/address_tracker_linux_unittest.cc',
'base/backoff_entry_unittest.cc',
'base/big_endian_unittest.cc',
'base/data_url_unittest.cc',
'base/directory_lister_unittest.cc',
'base/dns_util_unittest.cc',
'base/escape_unittest.cc',
'base/expiring_cache_unittest.cc',
'base/file_stream_unittest.cc',
'base/filter_unittest.cc',
'base/int128_unittest.cc',
'base/gzip_filter_unittest.cc',
'base/host_mapping_rules_unittest.cc',
'base/host_port_pair_unittest.cc',
'base/ip_endpoint_unittest.cc',
'base/keygen_handler_unittest.cc',
'base/mime_sniffer_unittest.cc',
'base/mime_util_unittest.cc',
'base/mock_filter_context.cc',
'base/mock_filter_context.h',
'base/net_log_unittest.cc',
'base/net_log_unittest.h',
'base/net_util_unittest.cc',
'base/network_change_notifier_win_unittest.cc',
'base/prioritized_dispatcher_unittest.cc',
'base/priority_queue_unittest.cc',
'base/registry_controlled_domains/registry_controlled_domain_unittest.cc',
'base/sdch_filter_unittest.cc',
'base/static_cookie_policy_unittest.cc',
'base/test_completion_callback_unittest.cc',
'base/upload_bytes_element_reader_unittest.cc',
'base/upload_data_stream_unittest.cc',
'base/upload_file_element_reader_unittest.cc',
'base/url_util_unittest.cc',
'cert/cert_verify_proc_unittest.cc',
'cert/crl_set_unittest.cc',
'cert/ev_root_ca_metadata_unittest.cc',
'cert/multi_threaded_cert_verifier_unittest.cc',
'cert/nss_cert_database_unittest.cc',
'cert/pem_tokenizer_unittest.cc',
'cert/x509_certificate_unittest.cc',
'cert/x509_cert_types_unittest.cc',
'cert/x509_util_unittest.cc',
'cert/x509_util_nss_unittest.cc',
'cert/x509_util_openssl_unittest.cc',
'cookies/canonical_cookie_unittest.cc',
'cookies/cookie_monster_unittest.cc',
'cookies/cookie_store_unittest.h',
'cookies/cookie_util_unittest.cc',
'cookies/parsed_cookie_unittest.cc',
'disk_cache/addr_unittest.cc',
'disk_cache/backend_unittest.cc',
'disk_cache/bitmap_unittest.cc',
'disk_cache/block_files_unittest.cc',
'disk_cache/cache_util_unittest.cc',
'disk_cache/entry_unittest.cc',
'disk_cache/mapped_file_unittest.cc',
'disk_cache/storage_block_unittest.cc',
'disk_cache/flash/flash_entry_unittest.cc',
'disk_cache/flash/log_store_entry_unittest.cc',
'disk_cache/flash/log_store_unittest.cc',
'disk_cache/flash/segment_unittest.cc',
'disk_cache/flash/storage_unittest.cc',
'dns/address_sorter_posix_unittest.cc',
'dns/address_sorter_unittest.cc',
'dns/dns_config_service_posix_unittest.cc',
'dns/dns_config_service_unittest.cc',
'dns/dns_config_service_win_unittest.cc',
'dns/dns_hosts_unittest.cc',
'dns/dns_query_unittest.cc',
'dns/dns_response_unittest.cc',
'dns/dns_session_unittest.cc',
'dns/dns_transaction_unittest.cc',
'dns/host_cache_unittest.cc',
'dns/host_resolver_impl_unittest.cc',
'dns/mapped_host_resolver_unittest.cc',
'dns/serial_worker_unittest.cc',
'dns/single_request_host_resolver_unittest.cc',
'ftp/ftp_auth_cache_unittest.cc',
'ftp/ftp_ctrl_response_buffer_unittest.cc',
'ftp/ftp_directory_listing_parser_ls_unittest.cc',
'ftp/ftp_directory_listing_parser_netware_unittest.cc',
'ftp/ftp_directory_listing_parser_os2_unittest.cc',
'ftp/ftp_directory_listing_parser_unittest.cc',
'ftp/ftp_directory_listing_parser_unittest.h',
'ftp/ftp_directory_listing_parser_vms_unittest.cc',
'ftp/ftp_directory_listing_parser_windows_unittest.cc',
'ftp/ftp_network_transaction_unittest.cc',
'ftp/ftp_util_unittest.cc',
'http/des_unittest.cc',
'http/http_auth_cache_unittest.cc',
'http/http_auth_controller_unittest.cc',
'http/http_auth_filter_unittest.cc',
'http/http_auth_gssapi_posix_unittest.cc',
'http/http_auth_handler_basic_unittest.cc',
'http/http_auth_handler_digest_unittest.cc',
'http/http_auth_handler_factory_unittest.cc',
'http/http_auth_handler_mock.cc',
'http/http_auth_handler_mock.h',
'http/http_auth_handler_negotiate_unittest.cc',
'http/http_auth_handler_unittest.cc',
'http/http_auth_sspi_win_unittest.cc',
'http/http_auth_unittest.cc',
'http/http_byte_range_unittest.cc',
'http/http_cache_unittest.cc',
'http/http_chunked_decoder_unittest.cc',
'http/http_content_disposition_unittest.cc',
'http/http_network_layer_unittest.cc',
'http/http_network_transaction_spdy3_unittest.cc',
'http/http_network_transaction_spdy2_unittest.cc',
'http/http_pipelined_connection_impl_unittest.cc',
'http/http_pipelined_host_forced_unittest.cc',
'http/http_pipelined_host_impl_unittest.cc',
'http/http_pipelined_host_pool_unittest.cc',
'http/http_pipelined_host_test_util.cc',
'http/http_pipelined_host_test_util.h',
'http/http_pipelined_network_transaction_unittest.cc',
'http/http_proxy_client_socket_pool_spdy2_unittest.cc',
'http/http_proxy_client_socket_pool_spdy3_unittest.cc',
'http/http_request_headers_unittest.cc',
'http/http_response_body_drainer_unittest.cc',
'http/http_response_headers_unittest.cc',
'http/http_security_headers_unittest.cc',
'http/http_server_properties_impl_unittest.cc',
'http/http_stream_factory_impl_unittest.cc',
'http/http_stream_parser_unittest.cc',
'http/http_transaction_unittest.cc',
'http/http_transaction_unittest.h',
'http/http_util_unittest.cc',
'http/http_vary_data_unittest.cc',
'http/mock_allow_url_security_manager.cc',
'http/mock_allow_url_security_manager.h',
'http/mock_gssapi_library_posix.cc',
'http/mock_gssapi_library_posix.h',
'http/mock_http_cache.cc',
'http/mock_http_cache.h',
'http/mock_sspi_library_win.cc',
'http/mock_sspi_library_win.h',
'http/transport_security_state_unittest.cc',
'http/url_security_manager_unittest.cc',
'proxy/dhcp_proxy_script_adapter_fetcher_win_unittest.cc',
'proxy/dhcp_proxy_script_fetcher_factory_unittest.cc',
'proxy/dhcp_proxy_script_fetcher_win_unittest.cc',
'proxy/multi_threaded_proxy_resolver_unittest.cc',
'proxy/network_delegate_error_observer_unittest.cc',
'proxy/proxy_bypass_rules_unittest.cc',
'proxy/proxy_config_service_android_unittest.cc',
'proxy/proxy_config_service_linux_unittest.cc',
'proxy/proxy_config_service_win_unittest.cc',
'proxy/proxy_config_unittest.cc',
'proxy/proxy_info_unittest.cc',
'proxy/proxy_list_unittest.cc',
'proxy/proxy_resolver_v8_tracing_unittest.cc',
'proxy/proxy_resolver_v8_unittest.cc',
'proxy/proxy_script_decider_unittest.cc',
'proxy/proxy_script_fetcher_impl_unittest.cc',
'proxy/proxy_server_unittest.cc',
'proxy/proxy_service_unittest.cc',
'quic/blocked_list_test.cc',
'quic/congestion_control/available_channel_estimator_test.cc',
'quic/congestion_control/channel_estimator_test.cc',
'quic/congestion_control/cube_root_test.cc',
'quic/congestion_control/cubic_test.cc',
'quic/congestion_control/fix_rate_test.cc',
'quic/congestion_control/hybrid_slow_start_test.cc',
'quic/congestion_control/inter_arrival_bitrate_ramp_up_test.cc',
'quic/congestion_control/inter_arrival_overuse_detector_test.cc',
'quic/congestion_control/inter_arrival_probe_test.cc',
'quic/congestion_control/inter_arrival_receiver_test.cc',
'quic/congestion_control/inter_arrival_state_machine_test.cc',
'quic/congestion_control/inter_arrival_sender_test.cc',
'quic/congestion_control/leaky_bucket_test.cc',
'quic/congestion_control/paced_sender_test.cc',
'quic/congestion_control/quic_congestion_control_test.cc',
'quic/congestion_control/quic_congestion_manager_test.cc',
'quic/congestion_control/quic_max_sized_map_test.cc',
'quic/congestion_control/tcp_cubic_sender_test.cc',
'quic/congestion_control/tcp_receiver_test.cc',
'quic/crypto/aes_128_gcm_decrypter_test.cc',
'quic/crypto/aes_128_gcm_encrypter_test.cc',
'quic/crypto/crypto_framer_test.cc',
'quic/crypto/crypto_handshake_test.cc',
'quic/crypto/curve25519_key_exchange_test.cc',
'quic/crypto/null_decrypter_test.cc',
'quic/crypto/null_encrypter_test.cc',
'quic/crypto/p256_key_exchange_test.cc',
'quic/crypto/quic_random_test.cc',
'quic/crypto/strike_register_test.cc',
'quic/test_tools/crypto_test_utils.cc',
'quic/test_tools/crypto_test_utils.h',
'quic/test_tools/mock_clock.cc',
'quic/test_tools/mock_clock.h',
'quic/test_tools/mock_crypto_client_stream.cc',
'quic/test_tools/mock_crypto_client_stream.h',
'quic/test_tools/mock_crypto_client_stream_factory.cc',
'quic/test_tools/mock_crypto_client_stream_factory.h',
'quic/test_tools/mock_random.cc',
'quic/test_tools/mock_random.h',
'quic/test_tools/quic_connection_peer.cc',
'quic/test_tools/quic_connection_peer.h',
'quic/test_tools/quic_framer_peer.cc',
'quic/test_tools/quic_framer_peer.h',
'quic/test_tools/quic_packet_creator_peer.cc',
'quic/test_tools/quic_packet_creator_peer.h',
'quic/test_tools/quic_session_peer.cc',
'quic/test_tools/quic_session_peer.h',
'quic/test_tools/quic_test_utils.cc',
'quic/test_tools/quic_test_utils.h',
'quic/test_tools/reliable_quic_stream_peer.cc',
'quic/test_tools/reliable_quic_stream_peer.h',
'quic/test_tools/simple_quic_framer.cc',
'quic/test_tools/simple_quic_framer.h',
'quic/test_tools/test_task_runner.cc',
'quic/test_tools/test_task_runner.h',
'quic/quic_bandwidth_test.cc',
'quic/quic_client_session_test.cc',
'quic/quic_clock_test.cc',
'quic/quic_connection_helper_test.cc',
'quic/quic_connection_test.cc',
'quic/quic_crypto_client_stream_test.cc',
'quic/quic_crypto_server_stream_test.cc',
'quic/quic_crypto_stream_test.cc',
'quic/quic_data_writer_test.cc',
'quic/quic_fec_group_test.cc',
'quic/quic_framer_test.cc',
'quic/quic_http_stream_test.cc',
'quic/quic_network_transaction_unittest.cc',
'quic/quic_packet_creator_test.cc',
'quic/quic_packet_entropy_manager_test.cc',
'quic/quic_packet_generator_test.cc',
'quic/quic_protocol_test.cc',
'quic/quic_reliable_client_stream_test.cc',
'quic/quic_session_test.cc',
'quic/quic_stream_factory_test.cc',
'quic/quic_stream_sequencer_test.cc',
'quic/quic_time_test.cc',
'quic/quic_utils_test.cc',
'quic/reliable_quic_stream_test.cc',
'socket/buffered_write_stream_socket_unittest.cc',
'socket/client_socket_pool_base_unittest.cc',
'socket/deterministic_socket_data_unittest.cc',
'socket/mock_client_socket_pool_manager.cc',
'socket/mock_client_socket_pool_manager.h',
'socket/socks5_client_socket_unittest.cc',
'socket/socks_client_socket_pool_unittest.cc',
'socket/socks_client_socket_unittest.cc',
'socket/ssl_client_socket_openssl_unittest.cc',
'socket/ssl_client_socket_pool_unittest.cc',
'socket/ssl_client_socket_unittest.cc',
'socket/ssl_server_socket_unittest.cc',
'socket/tcp_client_socket_unittest.cc',
'socket/tcp_listen_socket_unittest.cc',
'socket/tcp_listen_socket_unittest.h',
'socket/tcp_server_socket_unittest.cc',
'socket/transport_client_socket_pool_unittest.cc',
'socket/transport_client_socket_unittest.cc',
'socket/unix_domain_socket_posix_unittest.cc',
'socket_stream/socket_stream_metrics_unittest.cc',
'socket_stream/socket_stream_unittest.cc',
'spdy/buffered_spdy_framer_spdy3_unittest.cc',
'spdy/buffered_spdy_framer_spdy2_unittest.cc',
'spdy/spdy_credential_builder_unittest.cc',
'spdy/spdy_credential_state_unittest.cc',
'spdy/spdy_frame_builder_test.cc',
'spdy/spdy_frame_reader_test.cc',
'spdy/spdy_framer_test.cc',
'spdy/spdy_header_block_unittest.cc',
'spdy/spdy_http_stream_spdy3_unittest.cc',
'spdy/spdy_http_stream_spdy2_unittest.cc',
'spdy/spdy_http_utils_unittest.cc',
'spdy/spdy_network_transaction_spdy3_unittest.cc',
'spdy/spdy_network_transaction_spdy2_unittest.cc',
'spdy/spdy_priority_forest_test.cc',
'spdy/spdy_protocol_test.cc',
'spdy/spdy_proxy_client_socket_spdy3_unittest.cc',
'spdy/spdy_proxy_client_socket_spdy2_unittest.cc',
'spdy/spdy_session_spdy3_unittest.cc',
'spdy/spdy_session_spdy2_unittest.cc',
'spdy/spdy_stream_spdy3_unittest.cc',
'spdy/spdy_stream_spdy2_unittest.cc',
'spdy/spdy_stream_test_util.cc',
'spdy/spdy_stream_test_util.h',
'spdy/spdy_test_util_common.cc',
'spdy/spdy_test_util_common.h',
'spdy/spdy_test_util_spdy3.cc',
'spdy/spdy_test_util_spdy3.h',
'spdy/spdy_test_util_spdy2.cc',
'spdy/spdy_test_util_spdy2.h',
'spdy/spdy_test_utils.cc',
'spdy/spdy_test_utils.h',
'spdy/spdy_websocket_stream_spdy2_unittest.cc',
'spdy/spdy_websocket_stream_spdy3_unittest.cc',
'spdy/spdy_websocket_test_util_spdy2.cc',
'spdy/spdy_websocket_test_util_spdy2.h',
'spdy/spdy_websocket_test_util_spdy3.cc',
'spdy/spdy_websocket_test_util_spdy3.h',
'ssl/client_cert_store_impl_unittest.cc',
'ssl/default_server_bound_cert_store_unittest.cc',
'ssl/openssl_client_key_store_unittest.cc',
'ssl/server_bound_cert_service_unittest.cc',
'ssl/ssl_cipher_suite_names_unittest.cc',
'ssl/ssl_client_auth_cache_unittest.cc',
'ssl/ssl_config_service_unittest.cc',
'test/python_utils_unittest.cc',
'test/run_all_unittests.cc',
'test/test_certificate_data.h',
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_to_filename_encoder_unittest.cc',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
'tools/dump_cache/url_utilities_unittest.cc',
'udp/udp_socket_unittest.cc',
'url_request/url_fetcher_impl_unittest.cc',
'url_request/url_request_context_builder_unittest.cc',
'url_request/url_request_filter_unittest.cc',
'url_request/url_request_ftp_job_unittest.cc',
'url_request/url_request_http_job_unittest.cc',
'url_request/url_request_job_factory_impl_unittest.cc',
'url_request/url_request_job_unittest.cc',
'url_request/url_request_throttler_simulation_unittest.cc',
'url_request/url_request_throttler_test_support.cc',
'url_request/url_request_throttler_test_support.h',
'url_request/url_request_throttler_unittest.cc',
'url_request/url_request_unittest.cc',
'url_request/view_cache_helper_unittest.cc',
'websockets/websocket_errors_unittest.cc',
'websockets/websocket_frame_parser_unittest.cc',
'websockets/websocket_frame_unittest.cc',
'websockets/websocket_handshake_handler_unittest.cc',
'websockets/websocket_handshake_handler_spdy2_unittest.cc',
'websockets/websocket_handshake_handler_spdy3_unittest.cc',
'websockets/websocket_job_spdy2_unittest.cc',
'websockets/websocket_job_spdy3_unittest.cc',
'websockets/websocket_net_log_params_unittest.cc',
'websockets/websocket_throttle_unittest.cc',
],
'conditions': [
['chromeos==1', {
'sources!': [
'base/network_change_notifier_linux_unittest.cc',
'proxy/proxy_config_service_linux_unittest.cc',
],
}],
[ 'OS == "android"', {
'sources!': [
# No res_ninit() et al on Android, so this doesn't make a lot of
# sense.
'dns/dns_config_service_posix_unittest.cc',
'ssl/client_cert_store_impl_unittest.cc',
],
'dependencies': [
'net_javatests',
'net_test_jni_headers',
],
}],
[ 'use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}, { # else use_glib == 0: !posix || mac
'sources!': [
'cert/nss_cert_database_unittest.cc',
],
},
],
[ 'toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
},
],
[ 'os_posix == 1 and OS != "mac" and OS != "android" and OS != "ios"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
[ 'use_kerberos==1', {
'defines': [
'USE_KERBEROS',
],
}, { # use_kerberos == 0
'sources!': [
'http/http_auth_gssapi_posix_unittest.cc',
'http/http_auth_handler_negotiate_unittest.cc',
'http/mock_gssapi_library_posix.cc',
'http/mock_gssapi_library_posix.h',
],
}],
[ 'use_openssl==1', {
# When building for OpenSSL, we need to exclude NSS specific tests.
# TODO(bulach): Add equivalent tests when the underlying
# functionality is ported to OpenSSL.
'sources!': [
'cert/nss_cert_database_unittest.cc',
'cert/x509_util_nss_unittest.cc',
'ssl/client_cert_store_impl_unittest.cc',
],
}, { # else !use_openssl: remove the unneeded files
'sources!': [
'cert/x509_util_openssl_unittest.cc',
'socket/ssl_client_socket_openssl_unittest.cc',
'ssl/openssl_client_key_store_unittest.cc',
],
},
],
[ 'enable_websockets != 1', {
'sources/': [
['exclude', '^socket_stream/'],
['exclude', '^websockets/'],
['exclude', '^spdy/spdy_websocket_stream_spdy._unittest\\.cc$'],
],
}],
[ 'disable_ftp_support==1', {
'sources/': [
['exclude', '^ftp/'],
],
'sources!': [
'url_request/url_request_ftp_job_unittest.cc',
],
},
],
[ 'enable_built_in_dns!=1', {
'sources!': [
'dns/address_sorter_posix_unittest.cc',
'dns/address_sorter_unittest.cc',
],
},
],
[ 'use_v8_in_net==1', {
'dependencies': [
'net_with_v8',
],
}, { # else: !use_v8_in_net
'sources!': [
'proxy/proxy_resolver_v8_unittest.cc',
'proxy/proxy_resolver_v8_tracing_unittest.cc',
],
},
],
[ 'OS == "win"', {
'sources!': [
'dns/dns_config_service_posix_unittest.cc',
'http/http_auth_gssapi_posix_unittest.cc',
],
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
[ 'OS == "mac"', {
'dependencies': [
'../third_party/nss/nss.gyp:nspr',
'../third_party/nss/nss.gyp:nss',
'third_party/nss/ssl.gyp:libssl',
],
},
],
[ 'OS == "ios"', {
'dependencies': [
'../third_party/nss/nss.gyp:nss',
],
'actions': [
{
'action_name': 'copy_test_data',
'variables': {
'test_data_files': [
'data/ssl/certificates/',
'data/url_request_unittest/',
],
'test_data_prefix': 'net',
},
'includes': [ '../build/copy_test_data_ios.gypi' ],
},
],
'sources!': [
# TODO(droger): The following tests are disabled because the
# implementation is missing or incomplete.
# KeygenHandler::GenKeyAndSignChallenge() is not ported to iOS.
'base/keygen_handler_unittest.cc',
# Need to read input data files.
'base/gzip_filter_unittest.cc',
'disk_cache/backend_unittest.cc',
'disk_cache/block_files_unittest.cc',
'socket/ssl_server_socket_unittest.cc',
# Need TestServer.
'proxy/proxy_script_fetcher_impl_unittest.cc',
'socket/ssl_client_socket_unittest.cc',
'ssl/client_cert_store_impl_unittest.cc',
'url_request/url_fetcher_impl_unittest.cc',
'url_request/url_request_context_builder_unittest.cc',
# Needs GetAppOutput().
'test/python_utils_unittest.cc',
# The following tests are disabled because they don't apply to
# iOS.
# OS is not "linux" or "freebsd" or "openbsd".
'socket/unix_domain_socket_posix_unittest.cc',
],
'conditions': [
['coverage != 0', {
'sources!': [
# These sources can't be built with coverage due to a
# toolchain bug: http://openradar.appspot.com/radar?id=1499403
'http/transport_security_state_unittest.cc',
# These tests crash when run with coverage turned on due to an
# issue with llvm_gcda_increment_indirect_counter:
# http://crbug.com/156058
'cookies/cookie_monster_unittest.cc',
'cookies/cookie_store_unittest.h',
'http/http_auth_controller_unittest.cc',
'http/http_network_layer_unittest.cc',
'http/http_network_transaction_spdy2_unittest.cc',
'http/http_network_transaction_spdy3_unittest.cc',
'spdy/spdy_http_stream_spdy2_unittest.cc',
'spdy/spdy_http_stream_spdy3_unittest.cc',
'spdy/spdy_proxy_client_socket_spdy3_unittest.cc',
'spdy/spdy_session_spdy3_unittest.cc',
# These tests crash when run with coverage turned on:
# http://crbug.com/177203
'proxy/proxy_service_unittest.cc',
],
}],
],
}],
[ 'OS == "linux"', {
'dependencies': [
'../build/linux/system.gyp:dbus',
'../dbus/dbus.gyp:dbus_test_support',
],
},
],
[ 'OS == "android"', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
'sources!': [
'dns/dns_config_service_posix_unittest.cc',
],
},
],
['OS == "android" and gtest_target_type == "shared_library"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
]
}],
[ 'OS != "win" and OS != "mac"', {
'sources!': [
'cert/x509_cert_types_unittest.cc',
],
}],
],
},
{
'target_name': 'net_perftests',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../base/base.gyp:test_support_perf',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../testing/gtest.gyp:gtest',
'net',
'net_test_support',
],
'sources': [
'cookies/cookie_monster_perftest.cc',
'disk_cache/disk_cache_perftest.cc',
'proxy/proxy_resolver_perftest.cc',
],
'conditions': [
[ 'use_v8_in_net==1', {
'dependencies': [
'net_with_v8',
],
}, { # else: !use_v8_in_net
'sources!': [
'proxy/proxy_resolver_perftest.cc',
],
},
],
# This is needed to trigger the dll copy step on windows.
# TODO(mark): Specifying this here shouldn't be necessary.
[ 'OS == "win"', {
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
],
},
{
'target_name': 'net_test_support',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../testing/gtest.gyp:gtest',
'net',
],
'export_dependent_settings': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'base/capturing_net_log.cc',
'base/capturing_net_log.h',
'base/load_timing_info_test_util.cc',
'base/load_timing_info_test_util.h',
'base/mock_file_stream.cc',
'base/mock_file_stream.h',
'base/test_completion_callback.cc',
'base/test_completion_callback.h',
'base/test_data_directory.cc',
'base/test_data_directory.h',
'cert/mock_cert_verifier.cc',
'cert/mock_cert_verifier.h',
'cookies/cookie_monster_store_test.cc',
'cookies/cookie_monster_store_test.h',
'cookies/cookie_store_test_callbacks.cc',
'cookies/cookie_store_test_callbacks.h',
'cookies/cookie_store_test_helpers.cc',
'cookies/cookie_store_test_helpers.h',
'disk_cache/disk_cache_test_base.cc',
'disk_cache/disk_cache_test_base.h',
'disk_cache/disk_cache_test_util.cc',
'disk_cache/disk_cache_test_util.h',
'disk_cache/flash/flash_cache_test_base.h',
'disk_cache/flash/flash_cache_test_base.cc',
'dns/dns_test_util.cc',
'dns/dns_test_util.h',
'dns/mock_host_resolver.cc',
'dns/mock_host_resolver.h',
'proxy/mock_proxy_resolver.cc',
'proxy/mock_proxy_resolver.h',
'proxy/mock_proxy_script_fetcher.cc',
'proxy/mock_proxy_script_fetcher.h',
'proxy/proxy_config_service_common_unittest.cc',
'proxy/proxy_config_service_common_unittest.h',
'socket/socket_test_util.cc',
'socket/socket_test_util.h',
'test/base_test_server.cc',
'test/base_test_server.h',
'test/cert_test_util.cc',
'test/cert_test_util.h',
'test/local_test_server_posix.cc',
'test/local_test_server_win.cc',
'test/local_test_server.cc',
'test/local_test_server.h',
'test/net_test_suite.cc',
'test/net_test_suite.h',
'test/python_utils.cc',
'test/python_utils.h',
'test/remote_test_server.cc',
'test/remote_test_server.h',
'test/spawner_communicator.cc',
'test/spawner_communicator.h',
'test/test_server.h',
'url_request/test_url_fetcher_factory.cc',
'url_request/test_url_fetcher_factory.h',
'url_request/url_request_test_util.cc',
'url_request/url_request_test_util.h',
],
'conditions': [
['inside_chromium_build==1 and OS != "ios"', {
'dependencies': [
'../third_party/protobuf/protobuf.gyp:py_proto',
],
}],
['os_posix == 1 and OS != "mac" and OS != "android" and OS != "ios"', {
'conditions': [
['use_openssl==1', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
}, {
'dependencies': [
'../build/linux/system.gyp:ssl',
],
}],
],
}],
['os_posix == 1 and OS != "mac" and OS != "android" and OS != "ios"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
['OS != "android"', {
'sources!': [
'test/remote_test_server.cc',
'test/remote_test_server.h',
'test/spawner_communicator.cc',
'test/spawner_communicator.h',
],
}],
['OS == "ios"', {
'dependencies': [
'../third_party/nss/nss.gyp:nss',
],
}],
[ 'use_v8_in_net==1', {
'dependencies': [
'net_with_v8',
],
},
],
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'net_resources',
'type': 'none',
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/net',
},
'actions': [
{
'action_name': 'net_resources',
'variables': {
'grit_grd_file': 'base/net_resources.grd',
},
'includes': [ '../build/grit_action.gypi' ],
},
],
'includes': [ '../build/grit_target.gypi' ],
},
{
'target_name': 'http_server',
'type': 'static_library',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'server/http_connection.cc',
'server/http_connection.h',
'server/http_server.cc',
'server/http_server.h',
'server/http_server_request_info.cc',
'server/http_server_request_info.h',
'server/web_socket.cc',
'server/web_socket.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'dump_cache',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_test_support',
],
'sources': [
'tools/dump_cache/cache_dumper.cc',
'tools/dump_cache/cache_dumper.h',
'tools/dump_cache/dump_cache.cc',
'tools/dump_cache/dump_files.cc',
'tools/dump_cache/dump_files.h',
'tools/dump_cache/simple_cache_dumper.cc',
'tools/dump_cache/simple_cache_dumper.h',
'tools/dump_cache/upgrade_win.cc',
'tools/dump_cache/upgrade_win.h',
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
'conditions': [
['use_v8_in_net == 1', {
'targets': [
{
'target_name': 'net_with_v8',
'type': '<(component)',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../v8/tools/gyp/v8.gyp:v8',
'net'
],
'defines': [
'NET_IMPLEMENTATION',
],
'sources': [
'proxy/proxy_resolver_v8.cc',
'proxy/proxy_resolver_v8.h',
'proxy/proxy_resolver_v8_tracing.cc',
'proxy/proxy_resolver_v8_tracing.h',
'proxy/proxy_service_v8.cc',
'proxy/proxy_service_v8.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
}],
['OS != "ios"', {
'targets': [
# iOS doesn't have the concept of simple executables, these targets
# can't be compiled on the platform.
{
'target_name': 'crash_cache',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_test_support',
],
'sources': [
'tools/crash_cache/crash_cache.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'crl_set_dump',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/crl_set_dump/crl_set_dump.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'dns_fuzz_stub',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/dns_fuzz_stub/dns_fuzz_stub.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'fetch_client',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../testing/gtest.gyp:gtest',
'net',
'net_with_v8',
],
'sources': [
'tools/fetch/fetch_client.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'fetch_server',
'type': 'executable',
'variables': { 'enable_wexit_time_destructors': 1, },
'dependencies': [
'../base/base.gyp:base',
'../build/temp_gyp/googleurl.gyp:googleurl',
'net',
],
'sources': [
'tools/fetch/fetch_server.cc',
'tools/fetch/http_listen_socket.cc',
'tools/fetch/http_listen_socket.h',
'tools/fetch/http_server.cc',
'tools/fetch/http_server.h',
'tools/fetch/http_server_request_info.cc',
'tools/fetch/http_server_request_info.h',
'tools/fetch/http_server_response_info.cc',
'tools/fetch/http_server_response_info.h',
'tools/fetch/http_session.cc',
'tools/fetch/http_session.h',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'gdig',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/gdig/file_net_log.cc',
'tools/gdig/gdig.cc',
],
},
{
'target_name': 'get_server_time',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../build/temp_gyp/googleurl.gyp:googleurl',
'net',
],
'sources': [
'tools/get_server_time/get_server_time.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'net_watcher',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_with_v8',
],
'conditions': [
[ 'use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:gconf',
'../build/linux/system.gyp:gio',
],
},
],
],
'sources': [
'tools/net_watcher/net_watcher.cc',
],
},
{
'target_name': 'run_testserver',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:test_support_base',
'../testing/gtest.gyp:gtest',
'net_test_support',
],
'sources': [
'tools/testserver/run_testserver.cc',
],
},
{
'target_name': 'stress_cache',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'net',
'net_test_support',
],
'sources': [
'disk_cache/stress_cache.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
{
'target_name': 'tld_cleanup',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_i18n',
'../build/temp_gyp/googleurl.gyp:googleurl',
],
'sources': [
'tools/tld_cleanup/tld_cleanup.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
}],
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
'targets': [
{
'target_name': 'flip_balsa_and_epoll_library',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'net',
],
'sources': [
'tools/flip_server/balsa_enums.h',
'tools/flip_server/balsa_frame.cc',
'tools/flip_server/balsa_frame.h',
'tools/flip_server/balsa_headers.cc',
'tools/flip_server/balsa_headers.h',
'tools/flip_server/balsa_headers_token_utils.cc',
'tools/flip_server/balsa_headers_token_utils.h',
'tools/flip_server/balsa_visitor_interface.h',
'tools/flip_server/constants.h',
'tools/flip_server/epoll_server.cc',
'tools/flip_server/epoll_server.h',
'tools/flip_server/http_message_constants.cc',
'tools/flip_server/http_message_constants.h',
'tools/flip_server/split.h',
'tools/flip_server/split.cc',
],
},
{
'target_name': 'flip_in_mem_edsm_server',
'type': 'executable',
'cflags': [
'-Wno-deprecated',
],
'dependencies': [
'../base/base.gyp:base',
'../third_party/openssl/openssl.gyp:openssl',
'flip_balsa_and_epoll_library',
'net',
],
'sources': [
'tools/dump_cache/url_to_filename_encoder.cc',
'tools/dump_cache/url_to_filename_encoder.h',
'tools/dump_cache/url_utilities.h',
'tools/dump_cache/url_utilities.cc',
'tools/flip_server/acceptor_thread.h',
'tools/flip_server/acceptor_thread.cc',
'tools/flip_server/buffer_interface.h',
'tools/flip_server/create_listener.cc',
'tools/flip_server/create_listener.h',
'tools/flip_server/flip_config.cc',
'tools/flip_server/flip_config.h',
'tools/flip_server/flip_in_mem_edsm_server.cc',
'tools/flip_server/http_interface.cc',
'tools/flip_server/http_interface.h',
'tools/flip_server/loadtime_measurement.h',
'tools/flip_server/mem_cache.h',
'tools/flip_server/mem_cache.cc',
'tools/flip_server/output_ordering.cc',
'tools/flip_server/output_ordering.h',
'tools/flip_server/ring_buffer.cc',
'tools/flip_server/ring_buffer.h',
'tools/flip_server/simple_buffer.cc',
'tools/flip_server/simple_buffer.h',
'tools/flip_server/sm_connection.cc',
'tools/flip_server/sm_connection.h',
'tools/flip_server/sm_interface.h',
'tools/flip_server/spdy_ssl.cc',
'tools/flip_server/spdy_ssl.h',
'tools/flip_server/spdy_interface.cc',
'tools/flip_server/spdy_interface.h',
'tools/flip_server/spdy_util.cc',
'tools/flip_server/spdy_util.h',
'tools/flip_server/streamer_interface.cc',
'tools/flip_server/streamer_interface.h',
'tools/flip_server/string_piece_utils.h',
],
},
{
'target_name': 'quic_library',
'type': 'static_library',
'dependencies': [
'../base/base.gyp:base',
'../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../build/temp_gyp/googleurl.gyp:googleurl',
'../third_party/openssl/openssl.gyp:openssl',
'flip_balsa_and_epoll_library',
'net',
],
'sources': [
'tools/quic/quic_client.cc',
'tools/quic/quic_client.h',
'tools/quic/quic_client_session.cc',
'tools/quic/quic_client_session.h',
'tools/quic/quic_dispatcher.h',
'tools/quic/quic_dispatcher.cc',
'tools/quic/quic_epoll_clock.cc',
'tools/quic/quic_epoll_clock.h',
'tools/quic/quic_epoll_connection_helper.cc',
'tools/quic/quic_epoll_connection_helper.h',
'tools/quic/quic_in_memory_cache.cc',
'tools/quic/quic_in_memory_cache.h',
'tools/quic/quic_packet_writer.h',
'tools/quic/quic_reliable_client_stream.cc',
'tools/quic/quic_reliable_client_stream.h',
'tools/quic/quic_reliable_server_stream.cc',
'tools/quic/quic_reliable_server_stream.h',
'tools/quic/quic_server.cc',
'tools/quic/quic_server.h',
'tools/quic/quic_server_session.cc',
'tools/quic/quic_server_session.h',
'tools/quic/quic_socket_utils.cc',
'tools/quic/quic_socket_utils.h',
'tools/quic/quic_spdy_client_stream.cc',
'tools/quic/quic_spdy_client_stream.h',
'tools/quic/quic_spdy_server_stream.cc',
'tools/quic/quic_spdy_server_stream.h',
'tools/quic/quic_time_wait_list_manager.h',
'tools/quic/quic_time_wait_list_manager.cc',
'tools/quic/spdy_utils.cc',
'tools/quic/spdy_utils.h',
],
},
{
'target_name': 'quic_client',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../third_party/openssl/openssl.gyp:openssl',
'net',
'quic_library',
],
'sources': [
'tools/quic/quic_client_bin.cc',
],
},
{
'target_name': 'quic_server',
'type': 'executable',
'dependencies': [
'../base/base.gyp:base',
'../third_party/openssl/openssl.gyp:openssl',
'net',
'quic_library',
],
'sources': [
'tools/quic/quic_server_bin.cc',
],
},
{
'target_name': 'quic_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../base/base.gyp:test_support_base',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'net',
'quic_library',
],
'sources': [
'quic/test_tools/quic_session_peer.cc',
'quic/test_tools/quic_session_peer.h',
'quic/test_tools/crypto_test_utils.cc',
'quic/test_tools/crypto_test_utils.h',
'quic/test_tools/mock_clock.cc',
'quic/test_tools/mock_clock.h',
'quic/test_tools/mock_random.cc',
'quic/test_tools/mock_random.h',
'quic/test_tools/simple_quic_framer.cc',
'quic/test_tools/simple_quic_framer.h',
'quic/test_tools/quic_connection_peer.cc',
'quic/test_tools/quic_connection_peer.h',
'quic/test_tools/quic_framer_peer.cc',
'quic/test_tools/quic_framer_peer.h',
'quic/test_tools/quic_session_peer.cc',
'quic/test_tools/quic_session_peer.h',
'quic/test_tools/quic_test_utils.cc',
'quic/test_tools/quic_test_utils.h',
'quic/test_tools/reliable_quic_stream_peer.cc',
'quic/test_tools/reliable_quic_stream_peer.h',
'tools/flip_server/simple_buffer.cc',
'tools/flip_server/simple_buffer.h',
'tools/quic/end_to_end_test.cc',
'tools/quic/quic_client_session_test.cc',
'tools/quic/quic_dispatcher_test.cc',
'tools/quic/quic_epoll_clock_test.cc',
'tools/quic/quic_epoll_connection_helper_test.cc',
'tools/quic/quic_reliable_client_stream_test.cc',
'tools/quic/quic_reliable_server_stream_test.cc',
'tools/quic/test_tools/http_message_test_utils.cc',
'tools/quic/test_tools/http_message_test_utils.h',
'tools/quic/test_tools/mock_epoll_server.cc',
'tools/quic/test_tools/mock_epoll_server.h',
'tools/quic/test_tools/quic_test_client.cc',
'tools/quic/test_tools/quic_test_client.h',
'tools/quic/test_tools/quic_test_utils.cc',
'tools/quic/test_tools/quic_test_utils.h',
'tools/quic/test_tools/run_all_unittests.cc',
],
}
]
}],
['OS=="android"', {
'targets': [
{
'target_name': 'net_jni_headers',
'type': 'none',
'sources': [
'android/java/src/org/chromium/net/AndroidKeyStore.java',
'android/java/src/org/chromium/net/AndroidNetworkLibrary.java',
'android/java/src/org/chromium/net/GURLUtils.java',
'android/java/src/org/chromium/net/NetworkChangeNotifier.java',
'android/java/src/org/chromium/net/ProxyChangeListener.java',
],
'variables': {
'jni_gen_package': 'net',
},
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/net',
],
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
'target_name': 'net_test_jni_headers',
'type': 'none',
'sources': [
'android/javatests/src/org/chromium/net/AndroidKeyStoreTestUtil.java',
],
'variables': {
'jni_gen_package': 'net',
},
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/net',
],
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
'target_name': 'net_java',
'type': 'none',
'variables': {
'java_in_dir': '../net/android/java',
},
'dependencies': [
'../base/base.gyp:base',
'cert_verify_result_android_java',
'certificate_mime_types_java',
'net_errors_java',
'private_key_types_java',
],
'includes': [ '../build/java.gypi' ],
},
{
'target_name': 'net_java_test_support',
'type': 'none',
'variables': {
'java_in_dir': '../net/test/android/javatests',
},
'includes': [ '../build/java.gypi' ],
},
{
'target_name': 'net_javatests',
'type': 'none',
'variables': {
'java_in_dir': '../net/android/javatests',
},
'dependencies': [
'../base/base.gyp:base',
'../base/base.gyp:base_java_test_support',
'net_java',
],
'includes': [ '../build/java.gypi' ],
},
{
'target_name': 'net_errors_java',
'type': 'none',
'sources': [
'android/java/NetError.template',
],
'variables': {
'package_name': 'org/chromium/net',
'template_deps': ['base/net_error_list.h'],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
{
'target_name': 'certificate_mime_types_java',
'type': 'none',
'sources': [
'android/java/CertificateMimeType.template',
],
'variables': {
'package_name': 'org/chromium/net',
'template_deps': ['base/mime_util_certificate_type_list.h'],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
{
'target_name': 'cert_verify_result_android_java',
'type': 'none',
'sources': [
'android/java/CertVerifyResultAndroid.template',
],
'variables': {
'package_name': 'org/chromium/net',
'template_deps': ['android/cert_verify_result_android_list.h'],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
{
'target_name': 'private_key_types_java',
'type': 'none',
'sources': [
'android/java/PrivateKeyType.template',
],
'variables': {
'package_name': 'org/chromium/net',
'template_deps': ['android/private_key_type_list.h'],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
],
}],
# Special target to wrap a gtest_target_type==shared_library
# net_unittests into an android apk for execution.
# See base.gyp for TODO(jrg)s about this strategy.
['OS == "android" and gtest_target_type == "shared_library"', {
'targets': [
{
'target_name': 'net_unittests_apk',
'type': 'none',
'dependencies': [
'net_java',
'net_javatests',
'net_unittests',
],
'variables': {
'test_suite_name': 'net_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)net_unittests<(SHARED_LIB_SUFFIX)',
},
'includes': [ '../build/apk_test.gypi' ],
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'net_unittests_run',
'type': 'none',
'dependencies': [
'net_unittests',
],
'includes': [
'net_unittests.isolate',
],
'actions': [
{
'action_name': 'isolate',
'inputs': [
'net_unittests.isolate',
'<@(isolate_dependency_tracked)',
],
'outputs': [
'<(PRODUCT_DIR)/net_unittests.isolated',
],
'action': [
'python',
'../tools/swarm_client/isolate.py',
'<(test_isolation_mode)',
'--outdir', '<(test_isolation_outdir)',
'--variable', 'PRODUCT_DIR', '<(PRODUCT_DIR)',
'--variable', 'OS', '<(OS)',
'--result', '<@(_outputs)',
'--isolate', 'net_unittests.isolate',
],
},
],
},
],
}],
],
}
| 1.28125 | 1 |
python/clx/analytics/detector.py | mdemoret-nv/clx | 0 | 2905 | import logging
import torch
import torch.nn as nn
from abc import ABC, abstractmethod
log = logging.getLogger(__name__)
class Detector(ABC):
def __init__(self, lr=0.001):
self.lr = lr
self.__model = None
self.__optimizer = None
self.__criterion = nn.CrossEntropyLoss()
@property
def model(self):
return self.__model
@property
def optimizer(self):
return self.__optimizer
@property
def criterion(self):
return self.__criterion
@abstractmethod
def init_model(self, char_vocab, hidden_size, n_domain_type, n_layers):
pass
@abstractmethod
def train_model(self, epoch, train_dataset):
pass
@abstractmethod
def predict(self, epoch, train_dataset):
pass
def load_model(self, file_path):
""" This function load already saved model and sets cuda parameters.
:param file_path: File path of a model to loaded.
:type file_path: string
"""
model = torch.load(file_path)
model.eval()
self.__model = model
self.__set_model2cuda()
self.__set_optimizer()
def save_model(self, file_path):
""" This function saves model to given location.
:param file_path: File path to save model.
:type file_path: string
"""
torch.save(self.model, file_path)
def __set_parallelism(self):
gpu_count = torch.cuda.device_count()
if gpu_count > 1:
log.info("%s GPUs!" % (gpu_count))
self.__model = nn.DataParallel(self.model)
self.__set_model2cuda()
else:
self.__set_model2cuda()
def __set_optimizer(self):
self.__optimizer = torch.optim.RMSprop(
self.model.parameters(), self.lr, weight_decay=0.0
)
def __set_model2cuda(self):
if torch.cuda.is_available():
log.info("Setting cuda")
self.model.cuda()
def leverage_model(self, model):
"""This function leverages model by setting parallelism parameters.
:param model: Model instance.
:type model: RNNClassifier
"""
self.__model = model
self.__set_parallelism()
self.__set_optimizer()
| 2.78125 | 3 |
street_score/project/models.py | openplans/streetscore | 4 | 2906 | <reponame>openplans/streetscore<filename>street_score/project/models.py
import math
import random
from django.db import models
class TimeStampedModel (models.Model):
"""
Base model class for when you want to keep track of created and updated
times for model instances.
"""
created_datetime = models.DateTimeField(auto_now_add=True)
updated_datetime = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Rating (TimeStampedModel):
criterion = models.ForeignKey('Criterion', related_name='ratings')
""" The criterion that this rating is for.
"""
place1 = models.ForeignKey('Place', related_name='+')
""" The first place that this rating compares
"""
place2 = models.ForeignKey('Place', related_name='+')
""" The second place that this rating compares
"""
score = models.IntegerField()
""" The rating score. 1 means that place1 "wins" over place2 for the given
criterion. -1 means that place2 "wins".
"""
user_info = models.ForeignKey('sessions.UserInfo', null=True, related_name='ratings')
""" The information for the user that made this rating. Not required, but
useful for data analysis.
"""
def __unicode__(self):
meaning = ({
-1: 'more {0} than',
1: 'less {0} than',
0: 'as {0} as',
})
return ('Place #{p1} is {rating} place #{p2}').format(
p1=self.place1, p2=self.place2,
rating=meaning[self.score].format(self.criterion.prompt))
@property
def question(self):
"""
The question string to which the rating is a response.
"""
return self.criterion.prompt
class Criterion (models.Model):
prompt = models.TextField()
""" The question prompt, i.e. 'How clean is the street?'.
"""
def __unicode__(self):
return self.prompt
class Meta:
verbose_name_plural = "criteria"
class Place (models.Model):
lat = models.FloatField()
lon = models.FloatField()
def __unicode__(self):
return '({0}, {1})'.format(self.lat, self.lon)
class UserInfo (TimeStampedModel):
lat = models.FloatField(null=True)
lon = models.FloatField(null=True)
""" The user's location.
"""
SOURCES = (
('ip', 'IP Address'),
('html5', 'HTML5 Geolocation API'),
)
location_source = models.CharField(max_length=32, choices=SOURCES)
location_data = models.CharField(max_length=2048)
""" The method by which the location was obtained, and any additional
information required to recreate the location.
"""
session = models.OneToOneField('sessions.Session')
""" The Django browser session.
"""
def __unicode__(self):
return u'User for session {key}'.format(key=self.session.session_key)
class Meta:
app_label = 'sessions'
db_table = 'project_userinfo'
verbose_name_plural = 'User info'
class SiteConfiguration (models.Model):
site = models.OneToOneField('sites.Site', related_name='config')
google_analytics_key = models.CharField(max_length=256, null=True, blank=True)
addthis_key = models.CharField(max_length=256, null=True, blank=True)
addthis_title = models.CharField(max_length=256, null=True, blank=True)
about_title = models.CharField(max_length=256, null=True, blank=True)
about_text = models.TextField(null=True, blank=True)
about_text_is_html = models.BooleanField(blank=True, default=False)
def __unicode__(self):
return 'Configuration for {0}'.format(self.site.name)
class Meta:
app_label = 'sites'
db_table = 'project_siteconfiguration'
class SurveySession (object):
"""
"""
def __init__(self, questions=None, places=None):
self.__questions = questions
self.__places = places
@property
def questions(self):
"""
Get the set of questions for this survey.
"""
return self.__questions or self.init_questions()
@property
def places(self):
"""
Get the block for this session.
"""
return self.__places or self.init_places()
def init_places(self):
"""
Load two places at random.
TODO: Order the places by those that have the least questions answered
about them first.
"""
places = Place.objects.all().order_by('?')[:2]
self.__places = places
return self.__places
def init_questions(self):
"""
Load a set of questions at random.
"""
all_questions = (
Criterion.objects.all()
.annotate(num_ratings=models.Count('ratings'))
)
self.__questions = all_questions
return self.__questions
@classmethod
def make_surveys(cls, count=1):
# TODO: Choose the places and questions more smartly. Use the init_...
# methods defined above (and make them better too).
places = list(Place.objects.all().order_by('?')[:(count * 2)])
questions = list(Criterion.objects.all())
surveys = []
for i in range(count):
place1 = places[2 * i]
place2 = places[2 * i + 1]
surveys.append(cls(places=[place1, place2], questions=questions))
return surveys
| 2.96875 | 3 |
src/selfdroid/appstorage/crud/AppAdder.py | vitlabuda/selfdroid-web-app | 1 | 2907 | # SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sqlalchemy.exc
from selfdroid.appstorage.AppMetadata import AppMetadata
from selfdroid.appstorage.AppMetadataDBModel import AppMetadataDBModel
from selfdroid.appstorage.AppStorageConsistencyEnsurer import AppStorageConsistencyEnsurer
from selfdroid.appstorage.apk.APKParser import APKParser
from selfdroid.appstorage.apk.ParsedAPK import ParsedAPK
from selfdroid.appstorage.crud.AppAdderException import AppAdderException
from selfdroid.web.WebStatusMessageCollector import WebStatusMessageCollector
from selfdroid import db
class AppAdder:
"""
This class must be instantiated and have its public methods called in a locked context!
"""
def __init__(self, uploaded_apk_path: str):
self._uploaded_apk_path: str = uploaded_apk_path
self._parsed_apk: ParsedAPK = APKParser(self._uploaded_apk_path).parsed_apk
def add_app_while_locked(self) -> AppMetadata:
"""
:return: The metadata of the added app.
"""
try:
app_metadata = self._add_app_while_locked_with_exceptions_handled()
except (sqlalchemy.exc.SQLAlchemyError, OSError):
db.session.rollback()
raise AppAdderException("An error occurred while adding the app!")
finally:
AppStorageConsistencyEnsurer().ensure_consistency_while_locked()
return app_metadata
def _add_app_while_locked_with_exceptions_handled(self) -> AppMetadata:
self._check_if_app_can_be_added()
return self._perform_app_addition()
def _check_if_app_can_be_added(self) -> None:
an_app_with_the_same_package_name = AppMetadataDBModel.query.filter_by(package_name=self._parsed_apk.package_name).first()
if an_app_with_the_same_package_name is not None:
html_message = WebStatusMessageCollector.format_html_message("An app with the same package name <i>({})</i> is already present on the server! You should update the app instead of adding it!", self._parsed_apk.package_name)
raise AppAdderException(html_message)
def _perform_app_addition(self) -> AppMetadata:
# An UserReadableException mustn't be raised in this method!
# 1. Database
db_model = self._parsed_apk.create_new_db_model_with_metadata()
db.session.add(db_model)
db.session.commit()
assert isinstance(db_model.id, int)
app_metadata = AppMetadata.from_db_model(db_model)
# 2. APK
apk_path = app_metadata.get_apk_path()
os.rename(self._uploaded_apk_path, apk_path)
# 3. Icon
icon_path = app_metadata.get_icon_path()
with open(icon_path, "wb") as icon_file:
icon_file.write(self._parsed_apk.uniform_png_app_icon)
return app_metadata
| 1.164063 | 1 |
library/libvirt_filter.py | bkmeneguello/ansible-role-libvirt | 1 | 2908 | # TODO: https://libvirt.org/formatnwfilter.html | 0.960938 | 1 |
estafeta/core/__init__.py | Solunest/pyestafeta | 0 | 2909 | <reponame>Solunest/pyestafeta<filename>estafeta/core/__init__.py
from estafeta.core.client import EstafetaClient
user = None
password = <PASSWORD>
id = None
account_number = None
production = None
from estafeta.core.error import EstafetaWrongData, EstafetaEmptyField
__url_label__ = [
'https://labelqa.estafeta.com/EstafetaLabel20/services/EstafetaLabelWS?wsdl',
'https://label.estafeta.com/EstafetaLabel20/services/EstafetaLabelWS?wsdl',
]
__url_tracking__ = [
'https://trackingqa.estafeta.com/Service.asmx?wsdl',
'https://tracking.estafeta.com/Service.asmx?wsdl',
]
__url_quote__ = [
'http://frecuenciacotizador.estafeta.com/Service.asmx?wsdl',
'http://frecuenciacotizador.estafeta.com/Service.asmx?wsdl',
]
| 1.695313 | 2 |
yunionclient/api/flavors.py | tb365/mcclient_python | 3 | 2910 | <gh_stars>1-10
from yunionclient.common import base
class FlavorManager(base.StandaloneManager):
keyword = 'flavor'
keyword_plural = 'flavors'
_columns = ['ID', 'Name', 'VCPU_count', 'VMEM_size', 'Disk_size',
'Disk_backend', 'Ext_Bandwidth', 'Int_Bandwidth', 'is_public',
'Description', 'Aggregate_strategy', 'Flavor_type']
| 1.335938 | 1 |
char_map.py | rakib313/Bangla-End2End-Speech-Recognition | 0 | 2911 | <reponame>rakib313/Bangla-End2End-Speech-Recognition
"""
Defines two dictionaries for converting
between text and integer sequences.
"""
char_map_str = """
' 0
<SPACE> 1
ব 2
া 3
ং 4
ল 5
দ 6
ে 7
শ 8
য 9
় 10
ি 11
ত 12
্ 13
ন 14
এ 15
ধ 16
র 17
ণ 18
ক 19
ড 20
হ 21
উ 22
প 23
জ 24
অ 25
থ 26
স 27
ষ 28
ই 29
আ 30
ছ 31
গ 32
ু 33
ো 34
ও 35
ভ 36
ী 37
ট 38
ূ 39
ম 40
ৈ 41
ৃ 42
ঙ 43
খ 44
ঃ 45
১ 46
৯ 47
৬ 48
০ 49
২ 50
চ 51
ঘ 52
ৎ 53
৫ 54
৪ 55
ফ 56
ৌ 57
৮ 58
ঁ 59
য় 60
৩ 61
ঢ 62
ঠ 63
৭ 64
ড় 65
ঝ 66
ঞ 67
ঔ 68
ঈ 69
v 70
b 71
s 72
ঐ 73
2 74
0 75
1 76
4 77
f 78
o 79
t 80
a 81
l 82
w 83
r 84
d 85
c 86
u 87
p 88
n 89
g 90
ঋ 91
i 92
z 93
m 94
e 95
ঊ 96
h 97
x 98
3 99
5 100
y 101
9 102
ৗ 103
j 104
œ 105
8 106
ঢ় 107
k 108
ৰ 109
"""
# the "blank" character is mapped to 28
char_map = {}
index_map = {}
for line in char_map_str.strip().split('\n'):
ch, index = line.split()
char_map[ch] = int(index)
index_map[int(index)+1] = ch
index_map[2] = ' ' | 3.390625 | 3 |
app.py | MaggieChege/New_App | 0 | 2912 | from flask import Blueprint
from flask_restful import Api
# from restful import Api
from resources.Hello import CategoryResource
api_bp = Blueprint('api', __name__)
api = Api(api_bp)
# Route
api.add_resource(CategoryResource, '/Hello') | 2.296875 | 2 |
websockets.py | ejojmjn/indiana-phone | 0 | 2913 | #from gevent import monkey
#monkey.patch_all()
from flask import Flask, render_template, json
from flask_socketio import SocketIO, emit
from pydbus import SystemBus
from gi.repository import GLib
import threading
import json
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode='threading')
#socketio = SocketIO(app)
#Message: (':1.654', '/hfp/org/bluez/hci0/dev_94_65_2D_84_61_99', 'org.ofono.Modem', 'PropertyChanged', ('Powered', False))
#Data: Powered
bus = SystemBus()
def cb_server_signal_emission(*args):
print("Message: ", args)
makedev = lambda path : path.split('/')[-1]
iface = args[2]
if 'org.ofono.Modem' in iface:
if 'PropertyChanged' in args[3]:
message = { 'source': 'modem', 'event': 'property_change', 'device': makedev(args[1]), 'property': args[4][0], 'property_value': args[4][1] }
else:
message = {'unknown_signal': args }
elif 'org.ofono.NetworkRegistration' in iface:
if 'PropertyChanged' in args[3]:
message = { 'source': 'network', 'event': 'property_change', 'device': makedev(args[1]), 'property': args[4][0], 'property_value': args[4][1] }
else:
message = {'unknown_signal': args }
elif 'ofono.VoiceCallManager' in iface:
if 'CallAdded' in args[3]:
message = { 'source': 'callmgr', 'event': 'call_added', 'device': makedev(args[1]), 'properties': args[4][1] }
elif 'CallRemoved' in args[3]:
message = { 'source': 'callmgr', 'event': 'call_removed', 'device': makedev(args[1]) }
else:
message = {'unknown_signal': args }
elif 'ofono.VoiceCall' in iface:
if 'PropertyChanged' in args[3]:
message = { 'source': 'call', 'event': 'property_change', 'device': makedev(args[1]), 'property': args[4][0], 'property_value': args[4][1] }
else:
message = {'unknown_signal': args }
socketio.emit('message', json.dumps(message))
def dbus_monitor():
bus.subscribe(iface = 'org.ofono.Modem',
signal_fired = cb_server_signal_emission)
bus.subscribe(iface = 'org.ofono.NetworkRegistration',
signal_fired = cb_server_signal_emission)
print(bus)
bus.subscribe(iface = 'org.ofono.VoiceCallManager',
signal_fired = cb_server_signal_emission)
print(bus)
bus.subscribe(iface = 'org.ofono.VoiceCall',
signal_fired = cb_server_signal_emission)
loop = GLib.MainLoop()
loop.run()
@app.route('/')
def index():
return '''
<html>
<head>
<script type="text/javascript" src="//cdnjs.cloudflare.com/ajax/libs/socket.io/1.3.6/socket.io.min.js"></script>
<script type="text/javascript" charset="utf-8">
var socket = io.connect('http://' + document.domain + ':' + location.port);
socket.on('connect', function() {
socket.emit('connected', {data: 'Client connected!'});
});
socket.on('message', function(message) {
console.log('The server has a message for you: ' + message);
var t = document.getElementById("logbox");
t.value = t.value + 'MESSAGE: ' + message + '\\n';
});
</script>
</head>
<body>
<textarea id="logbox" width="100" rows="10"></textarea>
<br>
<button onclick="document.getElementById('logbox').value='';">Clear</button>
</body>
</html>
'''
@socketio.on('my event')
def handle_my_custom_event(arg1):
emit('message', {'data': 42})
if __name__ == '__main__':
t = threading.Thread(target=dbus_monitor)
t.daemon = True
t.start()
socketio.run(app, host='0.0.0.0', port=5001)
| 2.140625 | 2 |
tests/pytests/test_tags.py | wayn111/RediSearch | 0 | 2914 | <filename>tests/pytests/test_tags.py
# -*- coding: utf-8 -*-
from includes import *
from common import *
def search(env, r, *args):
return r.execute_command('ft.search', *args)
def testTagIndex(env):
r = env
env.expect('ft.create', 'idx', 'ON', 'HASH','schema', 'title', 'text', 'tags', 'tag').ok()
N = 10
for n in range(N):
env.expect('ft.add', 'idx', 'doc%d' % n, 1.0, 'fields',
'title', 'hello world term%d' % n, 'tags', 'foo bar,xxx,tag %d' % n).ok()
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = env.cmd('ft.search', 'idx', 'hello world')
env.assertEqual(10, res[0])
res = env.cmd('ft.search', 'idx', 'foo bar')
env.assertEqual(0, res[0])
res = env.cmd('ft.search', 'idx', '@tags:{foo bar}')
env.assertEqual(N, res[0])
# inorder should not affect tags
res = env.cmd(
'ft.search', 'idx', '@tags:{tag 1} @tags:{foo bar}', 'slop', '0', 'inorder')
env.assertEqual(1, res[0])
for n in range(N - 1):
res = env.cmd(
'ft.search', 'idx', '@tags:{tag %d}' % n, 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc%d' % n, res[1])
res = env.cmd(
'ft.search', 'idx', '@tags:{tag\\ %d}' % n, 'nocontent')
env.assertEqual(1, res[0])
res = env.cmd(
'ft.search', 'idx', 'hello world @tags:{tag\\ %d|tag %d}' % (n, n + 1), 'nocontent')
env.assertEqual(2, res[0])
res = py2sorted(res[1:])
env.assertEqual('doc%d' % n, res[0])
env.assertEqual('doc%d' % (n + 1), res[1])
res = env.cmd(
'ft.search', 'idx', 'term%d @tags:{tag %d}' % (n, n), 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc%d' % n, res[1])
def testSeparator(env):
r = env
env.expect(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'tags', 'tag', 'separator', ':').ok()
env.expect('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 'hello world', 'tags', 'x:hello world: fooz bar:foo,bar:BOO FAR').ok()
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
for q in ('@tags:{hello world}', '@tags:{fooz bar}', '@tags:{foo\\,bar}', '@tags:{boo\\ far}', '@tags:{x}'):
res = env.cmd('ft.search', 'idx', q)
env.assertEqual(1, res[0])
def testTagPrefix(env):
env.skipOnCluster()
r = env
env.expect(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'tags', 'tag', 'separator', ',').ok()
env.expect('ft.add', 'idx', 'doc1', 1.0, 'fields', 'title', 'hello world',
'tags', 'hello world,hello-world,hell,jell').ok()
env.expect('FT.DEBUG', 'dump_tagidx', 'idx', 'tags') \
.equal([['hell', [1]], ['hello world', [1]], ['hello-world', [1]], ['jell', [1]]])
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
for q in ('@tags:{hello world}', '@tags:{hel*}', '@tags:{hello\\-*}', '@tags:{he*}'):
res = env.cmd('ft.search', 'idx', q)
env.assertEqual(res[0], 1)
def testTagFieldCase(env):
r = env
env.expect(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'TAgs', 'tag').ok()
env.expect('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 'hello world', 'TAgs', 'HELLO WORLD,FOO BAR').ok()
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
env.assertListEqual([0], r.execute_command(
'FT.SEARCH', 'idx', '@tags:{HELLO WORLD}'))
env.assertListEqual([1, 'doc1'], r.execute_command(
'FT.SEARCH', 'idx', '@TAgs:{HELLO WORLD}', 'NOCONTENT'))
env.assertListEqual([1, 'doc1'], r.execute_command(
'FT.SEARCH', 'idx', '@TAgs:{foo bar}', 'NOCONTENT'))
env.assertListEqual([0], r.execute_command(
'FT.SEARCH', 'idx', '@TAGS:{foo bar}', 'NOCONTENT'))
def testInvalidSyntax(env):
r = env
# invalid syntax
with env.assertResponseError():
r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'tags', 'tag', 'separator')
with env.assertResponseError():
r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'tags', 'tag', 'separator', "foo")
with env.assertResponseError():
r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'tags', 'tag', 'separator', "")
def testTagVals(env):
r = env
r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'tags', 'tag', 'othertags', 'tag')
N = 100
alltags = set()
for n in range(N):
tags = ('foo %d' % n, 'bar %d' % n, 'x')
alltags.add(tags[0])
alltags.add(tags[1])
alltags.add(tags[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % n, 1.0, 'fields',
'tags', ','.join(tags), 'othertags', 'baz %d' % int(n // 2)))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command('ft.tagvals', 'idx', 'tags')
env.assertEqual(N * 2 + 1, len(res))
env.assertEqual(alltags, set(res))
res = r.execute_command('ft.tagvals', 'idx', 'othertags')
env.assertEqual(N / 2, len(res))
env.expect('ft.tagvals', 'idx').raiseError()
env.expect('ft.tagvals', 'idx', 'idx', 'idx').raiseError()
env.expect('ft.tagvals', 'fake_idx', 'tags').raiseError()
env.expect('ft.tagvals', 'idx', 'fake_tags').raiseError()
env.expect('ft.tagvals', 'idx', 'title').raiseError()
def testSearchNotExistsTagValue(env):
# this test basically make sure we are not leaking
env.expect('FT.CREATE idx ON HASH SCHEMA t TAG SORTABLE').ok()
env.expect('FT.SEARCH idx @t:{val}').equal([0])
def testIssue1305(env):
env.expect('FT.CREATE myIdx ON HASH SCHEMA title TAG').ok()
env.expect('FT.ADD myIdx doc2 1.0 FIELDS title "work"').ok()
env.expect('FT.ADD myIdx doc2 1.0 FIELDS title "hello"').error()
env.expect('FT.ADD myIdx doc3 1.0 FIELDS title "hello"').ok()
env.expect('FT.ADD myIdx doc1 1.0 FIELDS title "hello,work"').ok()
expectedRes = {'doc1' : ['inf', ['title', '"hello,work"']], 'doc3' : ['inf', ['title', '"hello"']], 'doc2' : ['inf', ['title', '"work"']]}
res = env.cmd('ft.search', 'myIdx', '~@title:{wor} ~@title:{hell}', 'WITHSCORES')[1:]
res = {res[i]:res[i + 1: i + 3] for i in range(0, len(res), 3)}
env.assertEqual(res, expectedRes)
def testTagCaseSensitive(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx1 SCHEMA t TAG').ok()
env.expect('FT.CREATE idx2 SCHEMA t TAG CASESENSITIVE').ok()
env.expect('FT.CREATE idx3 SCHEMA t TAG SEPARATOR .').ok()
env.expect('FT.CREATE idx4 SCHEMA t TAG SEPARATOR . CASESENSITIVE').ok()
env.expect('FT.CREATE idx5 SCHEMA t TAG CASESENSITIVE SEPARATOR .').ok()
conn.execute_command('HSET', 'doc1', 't', 'foo,FOO')
conn.execute_command('HSET', 'doc2', 't', 'FOO')
conn.execute_command('HSET', 'doc3', 't', 'foo')
if not env.is_cluster():
conn.execute_command('FT.CONFIG', 'SET', 'FORK_GC_CLEAN_THRESHOLD', '0')
env.expect('FT.DEBUG', 'dump_tagidx', 'idx1', 't').equal([['foo', [1, 2, 3]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx2', 't').equal([['foo', [1, 3]], ['FOO', [1, 2]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx3', 't').equal([['foo', [2, 3]], ['foo,foo', [1]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx4', 't').equal([['foo', [3]], ['foo,FOO', [1]], ['FOO', [2]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx5', 't').equal([['foo', [3]], ['foo,FOO', [1]], ['FOO', [2]]])
env.expect('FT.SEARCH', 'idx1', '@t:{FOO}') \
.equal([3, 'doc1', ['t', 'foo,FOO'], 'doc2', ['t', 'FOO'], 'doc3', ['t', 'foo']])
env.expect('FT.SEARCH', 'idx1', '@t:{foo}') \
.equal([3, 'doc1', ['t', 'foo,FOO'], 'doc2', ['t', 'FOO'], 'doc3', ['t', 'foo']])
env.expect('FT.SEARCH', 'idx2', '@t:{FOO}') \
.equal([2, 'doc1', ['t', 'foo,FOO'], 'doc2', ['t', 'FOO']])
env.expect('FT.SEARCH', 'idx2', '@t:{foo}') \
.equal([2, 'doc1', ['t', 'foo,FOO'], 'doc3', ['t', 'foo']])
conn.execute_command('HSET', 'doc1', 't', 'f o,F O')
conn.execute_command('HSET', 'doc2', 't', 'F O')
conn.execute_command('HSET', 'doc3', 't', 'f o')
if not env.is_cluster():
forceInvokeGC(env, 'idx1')
forceInvokeGC(env, 'idx2')
forceInvokeGC(env, 'idx3')
forceInvokeGC(env, 'idx4')
forceInvokeGC(env, 'idx5')
env.expect('FT.DEBUG', 'dump_tagidx', 'idx1', 't').equal([['f o', [4, 5, 6]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx2', 't').equal([['f o', [4, 6]], ['F O', [4, 5]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx3', 't').equal([['f o', [5, 6]], ['f o,f o', [4]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx4', 't').equal([['f o', [6]], ['f o,F O', [4]], ['F O', [5]]])
env.expect('FT.DEBUG', 'dump_tagidx', 'idx5', 't').equal([['f o', [6]], ['f o,F O', [4]], ['F O', [5]]])
# not casesensitive
env.expect('FT.SEARCH', 'idx1', '@t:{F\\ O}') \
.equal([3, 'doc1', ['t', 'f o,F O'], 'doc2', ['t', 'F O'], 'doc3', ['t', 'f o']])
env.expect('FT.SEARCH', 'idx1', '@t:{f\\ o}') \
.equal([3, 'doc1', ['t', 'f o,F O'], 'doc2', ['t', 'F O'], 'doc3', ['t', 'f o']])
# casesensitive
env.expect('FT.SEARCH', 'idx2', '@t:{F\\ O}') \
.equal([2, 'doc1', ['t', 'f o,F O'], 'doc2', ['t', 'F O']])
env.expect('FT.SEARCH', 'idx2', '@t:{f\\ o}') \
.equal([2, 'doc1', ['t', 'f o,F O'], 'doc3', ['t', 'f o']])
# not casesensitive
env.expect('FT.SEARCH', 'idx3', '@t:{f\\ o\\,f\\ o}') \
.equal([1, 'doc1', ['t', 'f o,F O']])
env.expect('FT.SEARCH', 'idx3', '@t:{f\\ o\\,F\\ O}') \
.equal([1, 'doc1', ['t', 'f o,F O']])
env.expect('FT.SEARCH', 'idx3', '@t:{F\\ O\\,F\\ O}') \
.equal([1, 'doc1', ['t', 'f o,F O']])
env.expect('FT.SEARCH', 'idx3', '@t:{F\\ O}') \
.equal([2, 'doc2', ['t', 'F O'], 'doc3', ['t', 'f o']])
env.expect('FT.SEARCH', 'idx3', '@t:{f\\ o}') \
.equal([2, 'doc2', ['t', 'F O'], 'doc3', ['t', 'f o']])
# casesensitive
env.expect('FT.SEARCH', 'idx4', '@t:{f\\ o\\,f\\ o}') \
.equal([0])
env.expect('FT.SEARCH', 'idx4', '@t:{f\\ o\\,F\\ O}') \
.equal([1, 'doc1', ['t', 'f o,F O']])
env.expect('FT.SEARCH', 'idx4', '@t:{F\\ O\\,F\\ O}') \
.equal([0])
env.expect('FT.SEARCH', 'idx4', '@t:{F\\ O}') \
.equal([1, 'doc2', ['t', 'F O']])
env.expect('FT.SEARCH', 'idx4', '@t:{f\\ o}') \
.equal([1, 'doc3', ['t', 'f o']])
def testTagGCClearEmpty(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
conn.execute_command('FT.CONFIG', 'SET', 'FORK_GC_CLEAN_THRESHOLD', '0')
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TAG')
conn.execute_command('HSET', 'doc1', 't', 'foo')
conn.execute_command('HSET', 'doc2', 't', 'bar')
conn.execute_command('HSET', 'doc3', 't', 'baz')
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([['foo', [1]], ['bar', [2]], ['baz', [3]]])
env.expect('FT.SEARCH', 'idx', '@t:{foo}').equal([1, 'doc1', ['t', 'foo']])
# delete two tags
conn.execute_command('DEL', 'doc1')
conn.execute_command('DEL', 'doc2')
forceInvokeGC(env, 'idx')
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([['baz', [3]]])
env.expect('FT.SEARCH', 'idx', '@t:{foo}').equal([0])
# delete last tag
conn.execute_command('DEL', 'doc3')
forceInvokeGC(env, 'idx')
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([])
# check term can be used after being empty
conn.execute_command('HSET', 'doc4', 't', 'foo')
conn.execute_command('HSET', 'doc5', 't', 'foo')
env.expect('FT.SEARCH', 'idx', '@t:{foo}') \
.equal([2, 'doc4', ['t', 'foo'], 'doc5', ['t', 'foo']])
def testTagGCClearEmptyWithCursor(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
conn.execute_command('FT.CONFIG', 'SET', 'FORK_GC_CLEAN_THRESHOLD', '0')
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TAG')
conn.execute_command('HSET', 'doc1', 't', 'foo')
conn.execute_command('HSET', 'doc2', 't', 'foo')
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([['foo', [1, 2]]])
res, cursor = env.cmd('FT.AGGREGATE', 'idx', '@t:{foo}', 'WITHCURSOR', 'COUNT', '1')
env.assertEqual(res, [1, []])
# delete both documents and run the GC to clean 'foo' inverted index
env.expect('DEL', 'doc1').equal(1)
env.expect('DEL', 'doc2').equal(1)
forceInvokeGC(env, 'idx')
# make sure the inverted index was cleaned
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([])
# read from the cursor
res, cursor = env.cmd('FT.CURSOR', 'READ', 'idx', cursor)
env.assertEqual(res, [0])
env.assertEqual(cursor, 0)
def testTagGCClearEmptyWithCursorAndMoreData(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
conn.execute_command('FT.CONFIG', 'SET', 'FORK_GC_CLEAN_THRESHOLD', '0')
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TAG')
conn.execute_command('HSET', 'doc1', 't', 'foo')
conn.execute_command('HSET', 'doc2', 't', 'foo')
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([['foo', [1, 2]]])
res, cursor = env.cmd('FT.AGGREGATE', 'idx', '@t:{foo}', 'WITHCURSOR', 'COUNT', '1')
env.assertEqual(res, [1, []])
# delete both documents and run the GC to clean 'foo' inverted index
env.expect('DEL', 'doc1').equal(1)
env.expect('DEL', 'doc2').equal(1)
forceInvokeGC(env, 'idx')
# make sure the inverted index was cleaned
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([])
# add data
conn.execute_command('HSET', 'doc3', 't', 'foo')
conn.execute_command('HSET', 'doc4', 't', 'foo')
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([['foo', [3, 4]]])
# read from the cursor
res, cursor = conn.execute_command('FT.CURSOR', 'READ', 'idx', cursor)
env.assertEqual(res, [0])
env.assertEqual(cursor, 0)
# ensure later documents with same tag are read
res = conn.execute_command('FT.AGGREGATE', 'idx', '@t:{foo}')
env.assertEqual(res, [1, [], []])
@unstable
def testEmptyTagLeak(env):
env.skipOnCluster()
cycles = 1
tags = 30
conn = getConnectionByEnv(env)
conn.execute_command('FT.CONFIG', 'SET', 'FORK_GC_CLEAN_THRESHOLD', '0')
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 't', 'TAG')
pl = conn.pipeline()
for i in range(cycles):
for j in range(tags):
x = j + i * tags
pl.execute_command('HSET', 'doc{}'.format(x), 't', 'tag{}'.format(x))
pl.execute()
for j in range(tags):
pl.execute_command('DEL', 'doc{}'.format(j + i * tags))
pl.execute()
forceInvokeGC(env, 'idx')
env.expect('FT.DEBUG', 'DUMP_TAGIDX', 'idx', 't').equal([])
| 2.140625 | 2 |
sc2/bot_ai.py | Lexa307/PhotonDefender | 2 | 2915 | <filename>sc2/bot_ai.py
import itertools
import logging
import math
import random
from collections import Counter
from typing import Any, Dict, List, Optional, Set, Tuple, Union # mypy type checking
from .cache import property_cache_forever, property_cache_once_per_frame
from .data import ActionResult, Alert, Race, Result, Target, race_gas, race_townhalls, race_worker
from .data import ActionResult, Attribute, Race, race_worker, race_townhalls, race_gas, Target, Result
from .game_data import AbilityData, GameData
# imports for mypy and pycharm autocomplete
from .game_state import GameState
from .game_data import GameData, AbilityData
from .ids.ability_id import AbilityId
from .ids.unit_typeid import UnitTypeId
from .ids.upgrade_id import UpgradeId
from .pixel_map import PixelMap
from .position import Point2, Point3
from .unit import Unit
from .units import Units
logger = logging.getLogger(__name__)
class BotAI:
"""Base class for bots."""
EXPANSION_GAP_THRESHOLD = 15
def __init__(self):
# Specific opponent bot ID used in sc2ai ladder games http://sc2ai.net/
# The bot ID will stay the same each game so your bot can "adapt" to the opponent
self.opponent_id: int = None
self.units: Units = None
self.workers: Units = None
self.townhalls: Units = None
self.geysers: Units = None
self.minerals: int = None
self.vespene: int = None
self.supply_army: Union[float, int] = None
self.supply_workers: Union[float, int] = None # Doesn't include workers in production
self.supply_cap: Union[float, int] = None
self.supply_used: Union[float, int] = None
self.supply_left: Union[float, int] = None
self.idle_worker_count: int = None
self.army_count: int = None
self.warp_gate_count: int = None
self.larva_count: int = None
self.cached_known_enemy_structures = None
self.cached_known_enemy_units = None
@property
def enemy_race(self) -> Race:
assert len(self._game_info.player_races) == 2, "enemy_race not available"
self.enemy_id = 3 - self.player_id
return Race(self._game_info.player_races[self.enemy_id])
@property
def time(self) -> Union[int, float]:
""" Returns time in seconds, assumes the game is played on 'faster' """
return self.state.game_loop / 22.4 # / (1/1.4) * (1/16)
@property
def time_formatted(self) -> str:
""" Returns time as string in min:sec format """
t = self.time
return f"{int(t // 60):02}:{int(t % 60):02}"
@property
def game_info(self) -> "GameInfo":
return self._game_info
def alert(self, alert_code: Alert) -> bool:
"""
Check if alert is triggered in the current step.
Example use:
from sc2.data import Alert
if self.alert(Alert.AddOnComplete):
print("Addon Complete")
Alert codes:
AlertError
AddOnComplete
BuildingComplete
BuildingUnderAttack
LarvaHatched
MergeComplete
MineralsExhausted
MorphComplete
MothershipComplete
MULEExpired
NuclearLaunchDetected
NukeComplete
NydusWormDetected
ResearchComplete
TrainError
TrainUnitComplete
TrainWorkerComplete
TransformationComplete
UnitUnderAttack
UpgradeComplete
VespeneExhausted
WarpInComplete
"""
assert isinstance(alert_code, Alert), f"alert_code {alert_code} is no Alert"
return alert_code.value in self.state.alerts
@property
def start_location(self) -> Point2:
return self._game_info.player_start_location
@property
def enemy_start_locations(self) -> List[Point2]:
"""Possible start locations for enemies."""
return self._game_info.start_locations
@property_cache_once_per_frame
def known_enemy_units(self) -> Units:
"""List of known enemy units, including structures."""
return self.state.enemy_units
@property_cache_once_per_frame
def known_enemy_structures(self) -> Units:
"""List of known enemy units, structures only."""
return self.state.enemy_units.structure
@property
def main_base_ramp(self) -> "Ramp":
""" Returns the Ramp instance of the closest main-ramp to start location.
Look in game_info.py for more information """
if hasattr(self, "cached_main_base_ramp"):
return self.cached_main_base_ramp
# The reason for len(ramp.upper) in {2, 5} is:
# ParaSite map has 5 upper points, and most other maps have 2 upper points at the main ramp.
# The map Acolyte has 4 upper points at the wrong ramp (which is closest to the start position).
try:
self.cached_main_base_ramp = min(
(ramp for ramp in self.game_info.map_ramps if len(ramp.upper) in {2, 5}),
key=lambda r: self.start_location.distance_to(r.top_center),
)
except ValueError:
# Hardcoded hotfix for Honorgrounds LE map, as that map has a large main base ramp with inbase natural
self.cached_main_base_ramp = min(
(ramp for ramp in self.game_info.map_ramps if len(ramp.upper) in {4, 9}),
key=lambda r: self.start_location.distance_to(r.top_center),
)
return self.cached_main_base_ramp
@property_cache_forever
def expansion_locations(self) -> Dict[Point2, Units]:
"""
Returns dict with the correct expansion position Point2 object as key,
resources (mineral field and vespene geyser) as value.
"""
# Idea: create a group for every resource, then merge these groups if
# any resource in a group is closer than 6 to any resource of another group
# Distance we group resources by
RESOURCE_SPREAD_THRESHOLD = 8.5
geysers = self.state.vespene_geyser
# Create a group for every resource
resource_groups = [[resource] for resource in self.state.resources]
# Loop the merging process as long as we change something
found_something = True
while found_something:
found_something = False
# Check every combination of two groups
for group_a, group_b in itertools.combinations(resource_groups, 2):
# Check if any pair of resource of these groups is closer than threshold together
if any(
resource_a.distance_to(resource_b) <= RESOURCE_SPREAD_THRESHOLD
for resource_a, resource_b in itertools.product(group_a, group_b)
):
# Remove the single groups and add the merged group
resource_groups.remove(group_a)
resource_groups.remove(group_b)
resource_groups.append(group_a + group_b)
found_something = True
break
# Distance offsets we apply to center of each resource group to find expansion position
offset_range = 7
offsets = [
(x, y)
for x, y in itertools.product(range(-offset_range, offset_range + 1), repeat=2)
if math.hypot(x, y) <= 8
]
# Dict we want to return
centers = {}
# For every resource group:
for resources in resource_groups:
# Possible expansion points
amount = len(resources)
# Calculate center, round and add 0.5 because expansion location will have (x.5, y.5)
# coordinates because bases have size 5.
center_x = int(sum(resource.position.x for resource in resources) / amount) + 0.5
center_y = int(sum(resource.position.y for resource in resources) / amount) + 0.5
possible_points = (Point2((offset[0] + center_x, offset[1] + center_y)) for offset in offsets)
# Filter out points that are too near
possible_points = (
point
for point in possible_points
# Check if point can be built on
if self._game_info.placement_grid[point.rounded] == 1
# Check if all resources have enough space to point
and all(point.distance_to(resource) > (7 if resource in geysers else 6) for resource in resources)
)
# Choose best fitting point
result = min(possible_points, key=lambda point: sum(point.distance_to(resource) for resource in resources))
centers[result] = resources
return centers
def _correct_zerg_supply(self):
""" The client incorrectly rounds zerg supply down instead of up (see
https://github.com/Blizzard/s2client-proto/issues/123), so self.supply_used
and friends return the wrong value when there are an odd number of zerglings
and banelings. This function corrects the bad values. """
# TODO: remove when Blizzard/sc2client-proto#123 gets fixed.
half_supply_units = {
UnitTypeId.ZERGLING,
UnitTypeId.ZERGLINGBURROWED,
UnitTypeId.BANELING,
UnitTypeId.BANELINGBURROWED,
UnitTypeId.BANELINGCOCOON,
}
correction = self.units(half_supply_units).amount % 2
self.supply_used += correction
self.supply_army += correction
self.supply_left -= correction
async def get_available_abilities(
self, units: Union[List[Unit], Units], ignore_resource_requirements=False
) -> List[List[AbilityId]]:
""" Returns available abilities of one or more units. Right know only checks cooldown, energy cost, and whether the ability has been researched.
Example usage:
units_abilities = await self.get_available_abilities(self.units)
or
units_abilities = await self.get_available_abilities([self.units.random]) """
return await self._client.query_available_abilities(units, ignore_resource_requirements)
async def expand_now(
self, building: UnitTypeId = None, max_distance: Union[int, float] = 10, location: Optional[Point2] = None
):
""" Not recommended as this function uses 'self.do' (reduces performance).
Finds the next possible expansion via 'self.get_next_expansion()'. If the target expansion is blocked (e.g. an enemy unit), it will misplace the expansion. """
if not building:
# self.race is never Race.Random
start_townhall_type = {
Race.Protoss: UnitTypeId.NEXUS,
Race.Terran: UnitTypeId.COMMANDCENTER,
Race.Zerg: UnitTypeId.HATCHERY,
}
building = start_townhall_type[self.race]
assert isinstance(building, UnitTypeId)
if not location:
location = await self.get_next_expansion()
await self.build(building, near=location, max_distance=max_distance, random_alternative=False, placement_step=1)
async def get_next_expansion(self) -> Optional[Point2]:
"""Find next expansion location."""
closest = None
distance = math.inf
for el in self.expansion_locations:
def is_near_to_expansion(t):
return t.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
if any(map(is_near_to_expansion, self.townhalls)):
# already taken
continue
startp = self._game_info.player_start_location
d = await self._client.query_pathing(startp, el)
if d is None:
continue
if d < distance:
distance = d
closest = el
return closest
async def distribute_workers(self, resource_ratio: float = 2):
"""
Distributes workers across all the bases taken.
Keyword `resource_ratio` takes a float. If the current minerals to gas
ratio is bigger than `resource_ratio`, this function prefer filling geysers
first, if it is lower, it will prefer sending workers to minerals first.
This is only for workers that need to be moved anyways, it will NOT will
geysers on its own.
NOTE: This function is far from optimal, if you really want to have
refined worker control, you should write your own distribution function.
For example long distance mining control and moving workers if a base was killed
are not being handled.
WARNING: This is quite slow when there are lots of workers or multiple bases.
"""
if not self.state.mineral_field or not self.workers or not self.townhalls.ready:
return
actions = []
worker_pool = [worker for worker in self.workers.idle]
bases = self.townhalls.ready
geysers = self.geysers.ready
# list of places that need more workers
deficit_mining_places = []
for mining_place in bases | geysers:
difference = mining_place.surplus_harvesters
# perfect amount of workers, skip mining place
if not difference:
continue
if mining_place.is_vespene_geyser:
# get all workers that target the gas extraction site
# or are on their way back from it
local_workers = self.workers.filter(
lambda unit: unit.order_target == mining_place.tag
or (unit.is_carrying_vespene and unit.order_target == bases.closest_to(mining_place).tag)
)
else:
# get tags of minerals around expansion
local_minerals_tags = {
mineral.tag for mineral in self.state.mineral_field if mineral.distance_to(mining_place) <= 8
}
# get all target tags a worker can have
# tags of the minerals he could mine at that base
# get workers that work at that gather site
local_workers = self.workers.filter(
lambda unit: unit.order_target in local_minerals_tags
or (unit.is_carrying_minerals and unit.order_target == mining_place.tag)
)
# too many workers
if difference > 0:
for worker in local_workers[:difference]:
worker_pool.append(worker)
# too few workers
# add mining place to deficit bases for every missing worker
else:
deficit_mining_places += [mining_place for _ in range(-difference)]
# prepare all minerals near a base if we have too many workers
# and need to send them to the closest patch
if len(worker_pool) > len(deficit_mining_places):
all_minerals_near_base = [
mineral
for mineral in self.state.mineral_field
if any(mineral.distance_to(base) <= 8 for base in self.townhalls.ready)
]
# distribute every worker in the pool
for worker in worker_pool:
# as long as have workers and mining places
if deficit_mining_places:
# choose only mineral fields first if current mineral to gas ratio is less than target ratio
if self.vespene and self.minerals / self.vespene < resource_ratio:
possible_mining_places = [place for place in deficit_mining_places if not place.vespene_contents]
# else prefer gas
else:
possible_mining_places = [place for place in deficit_mining_places if place.vespene_contents]
# if preferred type is not available any more, get all other places
if not possible_mining_places:
possible_mining_places = deficit_mining_places
# find closest mining place
current_place = min(deficit_mining_places, key=lambda place: place.distance_to(worker))
# remove it from the list
deficit_mining_places.remove(current_place)
# if current place is a gas extraction site, go there
if current_place.vespene_contents:
actions.append(worker.gather(current_place))
# if current place is a gas extraction site,
# go to the mineral field that is near and has the most minerals left
else:
local_minerals = [
mineral for mineral in self.state.mineral_field if mineral.distance_to(current_place) <= 8
]
target_mineral = max(local_minerals, key=lambda mineral: mineral.mineral_contents)
actions.append(worker.gather(target_mineral))
# more workers to distribute than free mining spots
# send to closest if worker is doing nothing
elif worker.is_idle and all_minerals_near_base:
target_mineral = min(all_minerals_near_base, key=lambda mineral: mineral.distance_to(worker))
actions.append(worker.gather(target_mineral))
else:
# there are no deficit mining places and worker is not idle
# so dont move him
pass
await self.do_actions(actions)
@property
def owned_expansions(self) -> Dict[Point2, Unit]:
"""List of expansions owned by the player."""
owned = {}
for el in self.expansion_locations:
def is_near_to_expansion(t):
return t.distance_to(el) < self.EXPANSION_GAP_THRESHOLD
th = next((x for x in self.townhalls if is_near_to_expansion(x)), None)
if th:
owned[el] = th
return owned
def can_feed(self, unit_type: UnitTypeId) -> bool:
""" Checks if you have enough free supply to build the unit """
required = self._game_data.units[unit_type.value]._proto.food_required
return required == 0 or self.supply_left >= required
def can_afford(
self, item_id: Union[UnitTypeId, UpgradeId, AbilityId], check_supply_cost: bool = True
) -> "CanAffordWrapper":
"""Tests if the player has enough resources to build a unit or cast an ability."""
enough_supply = True
if isinstance(item_id, UnitTypeId):
unit = self._game_data.units[item_id.value]
cost = self._game_data.calculate_ability_cost(unit.creation_ability)
if check_supply_cost:
enough_supply = self.can_feed(item_id)
elif isinstance(item_id, UpgradeId):
cost = self._game_data.upgrades[item_id.value].cost
else:
cost = self._game_data.calculate_ability_cost(item_id)
return CanAffordWrapper(cost.minerals <= self.minerals, cost.vespene <= self.vespene, enough_supply)
async def can_cast(
self,
unit: Unit,
ability_id: AbilityId,
target: Optional[Union[Unit, Point2, Point3]] = None,
only_check_energy_and_cooldown: bool = False,
cached_abilities_of_unit: List[AbilityId] = None,
) -> bool:
"""Tests if a unit has an ability available and enough energy to cast it.
See data_pb2.py (line 161) for the numbers 1-5 to make sense"""
assert isinstance(unit, Unit)
assert isinstance(ability_id, AbilityId)
assert isinstance(target, (type(None), Unit, Point2, Point3))
# check if unit has enough energy to cast or if ability is on cooldown
if cached_abilities_of_unit:
abilities = cached_abilities_of_unit
else:
abilities = (await self.get_available_abilities([unit]))[0]
if ability_id in abilities:
if only_check_energy_and_cooldown:
return True
cast_range = self._game_data.abilities[ability_id.value]._proto.cast_range
ability_target = self._game_data.abilities[ability_id.value]._proto.target
# Check if target is in range (or is a self cast like stimpack)
if (
ability_target == 1
or ability_target == Target.PointOrNone.value
and isinstance(target, (Point2, Point3))
and unit.distance_to(target) <= cast_range
): # cant replace 1 with "Target.None.value" because ".None" doesnt seem to be a valid enum name
return True
# Check if able to use ability on a unit
elif (
ability_target in {Target.Unit.value, Target.PointOrUnit.value}
and isinstance(target, Unit)
and unit.distance_to(target) <= cast_range
):
return True
# Check if able to use ability on a position
elif (
ability_target in {Target.Point.value, Target.PointOrUnit.value}
and isinstance(target, (Point2, Point3))
and unit.distance_to(target) <= cast_range
):
return True
return False
def select_build_worker(self, pos: Union[Unit, Point2, Point3], force: bool = False) -> Optional[Unit]:
"""Select a worker to build a building with."""
workers = (
self.workers.filter(lambda w: (w.is_gathering or w.is_idle) and w.distance_to(pos) < 20) or self.workers
)
if workers:
for worker in workers.sorted_by_distance_to(pos).prefer_idle:
if (
not worker.orders
or len(worker.orders) == 1
and worker.orders[0].ability.id in {AbilityId.MOVE, AbilityId.HARVEST_GATHER}
):
return worker
return workers.random if force else None
async def can_place(self, building: Union[AbilityData, AbilityId, UnitTypeId], position: Point2) -> bool:
"""Tests if a building can be placed in the given location."""
building_type = type(building)
assert building_type in {AbilityData, AbilityId, UnitTypeId}
if building_type == UnitTypeId:
building = self._game_data.units[building.value].creation_ability
elif building_type == AbilityId:
building = self._game_data.abilities[building.value]
r = await self._client.query_building_placement(building, [position])
return r[0] == ActionResult.Success
async def find_placement(
self,
building: UnitTypeId,
near: Union[Unit, Point2, Point3],
max_distance: int = 20,
random_alternative: bool = True,
placement_step: int = 2,
) -> Optional[Point2]:
"""Finds a placement location for building."""
assert isinstance(building, (AbilityId, UnitTypeId))
assert isinstance(near, Point2)
if isinstance(building, UnitTypeId):
building = self._game_data.units[building.value].creation_ability
else: # AbilityId
building = self._game_data.abilities[building.value]
if await self.can_place(building, near):
return near
if max_distance == 0:
return None
for distance in range(placement_step, max_distance, placement_step):
possible_positions = [
Point2(p).offset(near).to2
for p in (
[(dx, -distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(dx, distance) for dx in range(-distance, distance + 1, placement_step)]
+ [(-distance, dy) for dy in range(-distance, distance + 1, placement_step)]
+ [(distance, dy) for dy in range(-distance, distance + 1, placement_step)]
)
]
res = await self._client.query_building_placement(building, possible_positions)
possible = [p for r, p in zip(res, possible_positions) if r == ActionResult.Success]
if not possible:
continue
if random_alternative:
return random.choice(possible)
else:
return min(possible, key=lambda p: p.distance_to_point2(near))
return None
def already_pending_upgrade(self, upgrade_type: UpgradeId) -> Union[int, float]:
""" Check if an upgrade is being researched
Return values:
0: not started
0 < x < 1: researching
1: finished
"""
assert isinstance(upgrade_type, UpgradeId)
if upgrade_type in self.state.upgrades:
return 1
level = None
if "LEVEL" in upgrade_type.name:
level = upgrade_type.name[-1]
creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.id
for structure in self.units.filter(lambda unit: unit.is_structure and unit.is_ready):
for order in structure.orders:
if order.ability.id is creationAbilityID:
if level and order.ability.button_name[-1] != level:
return 0
return order.progress
return 0
@property_cache_once_per_frame
def _abilities_all_units(self) -> Counter:
""" Cache for the already_pending function, includes protoss units warping in, and all units in production, and all structures, and all morphs """
abilities_amount = Counter()
for unit in self.units: # type: Unit
for order in unit.orders:
abilities_amount[order.ability] += 1
if not unit.is_ready:
if self.race != Race.Terran or not unit.is_structure:
# If an SCV is constructing a building, already_pending would count this structure twice (once from the SCV order, and once from "not structure.is_ready")
abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1
return abilities_amount
@property_cache_once_per_frame
def _abilities_workers_and_eggs(self) -> Counter:
""" Cache for the already_pending function, includes all worker orders (including pending).
Zerg units in production (except queens and morphing units) and structures in production,
counts double for terran """
abilities_amount = Counter()
for worker in self.workers: # type: Unit
for order in worker.orders:
abilities_amount[order.ability] += 1
if self.race == Race.Zerg:
for egg in self.units(UnitTypeId.EGG): # type: Unit
for order in egg.orders:
abilities_amount[order.ability] += 1
if self.race != Race.Terran:
# If an SCV is constructing a building, already_pending would count this structure twice
# (once from the SCV order, and once from "not structure.is_ready")
for unit in self.units.structure.not_ready: # type: Unit
abilities_amount[self._game_data.units[unit.type_id.value].creation_ability] += 1
return abilities_amount
def already_pending(self, unit_type: Union[UpgradeId, UnitTypeId], all_units: bool = True) -> int:
"""
Returns a number of buildings or units already in progress, or if a
worker is en route to build it. This also includes queued orders for
workers and build queues of buildings.
If all_units==True, then build queues of other units (such as Carriers
(Interceptors) or Oracles (Stasis Ward)) are also included.
"""
# TODO / FIXME: SCV building a structure might be counted as two units
if isinstance(unit_type, UpgradeId):
return self.already_pending_upgrade(unit_type)
ability = self._game_data.units[unit_type.value].creation_ability
amount = len(self.units(unit_type).not_ready)
if all_units:
amount += sum([o.ability == ability for u in self.units for o in u.orders])
else:
amount += sum([o.ability == ability for w in self.workers for o in w.orders])
amount += sum([egg.orders[0].ability == ability for egg in self.units(UnitTypeId.EGG)])
return amount
async def build(self, building: UnitTypeId, near: Union[Point2, Point3], max_distance: int=20, unit: Optional[Unit]=None, random_alternative: bool=True, placement_step: int=2):
"""Build a building."""
if isinstance(near, Unit):
near = near.position.to2
elif near is not None:
near = near.to2
else:
return
p = await self.find_placement(building, near.rounded, max_distance, random_alternative, placement_step)
if p is None:
return ActionResult.CantFindPlacementLocation
unit = unit or self.select_build_worker(p)
if unit is None or not self.can_afford(building):
return ActionResult.Error
return await self.do(unit.build(building, p))
async def do(self, action):
if not self.can_afford(action):
logger.warning(f"Cannot afford action {action}")
return ActionResult.Error
r = await self._client.actions(action)
if not r: # success
cost = self._game_data.calculate_ability_cost(action.ability)
self.minerals -= cost.minerals
self.vespene -= cost.vespene
else:
logger.error(f"Error: {r} (action: {action})")
return r
async def do_actions(self, actions: List["UnitCommand"], prevent_double=True):
""" Unlike 'self.do()', this function does not instantly subtract minerals and vespene. """
if not actions:
return None
if prevent_double:
actions = list(filter(self.prevent_double_actions, actions))
for action in actions:
cost = self._game_data.calculate_ability_cost(action.ability)
self.minerals -= cost.minerals
self.vespene -= cost.vespene
return await self._client.actions(actions)
def prevent_double_actions(self, action):
# always add actions if queued
if action.queue:
return True
if action.unit.orders:
# action: UnitCommand
# current_action: UnitOrder
current_action = action.unit.orders[0]
if current_action.ability.id != action.ability:
# different action, return true
return True
try:
if current_action.target == action.target.tag:
# same action, remove action if same target unit
return False
except AttributeError:
pass
try:
if action.target.x == current_action.target.x and action.target.y == current_action.target.y:
# same action, remove action if same target position
return False
except AttributeError:
pass
return True
return True
async def chat_send(self, message: str):
""" Send a chat message. """
assert isinstance(message, str), f"{message} is no string"
await self._client.chat_send(message, False)
# For the functions below, make sure you are inside the boundries of the map size.
def get_terrain_height(self, pos: Union[Point2, Point3, Unit]) -> int:
""" Returns terrain height at a position.
Caution: terrain height is different from a unit's z-coordinate.
"""
assert isinstance(pos, (Point2, Point3, Unit)), f"pos is not of type Point2, Point3 or Unit"
pos = pos.position.to2.rounded
return self._game_info.terrain_height[pos] # returns int
def get_terrain_z_height(self, pos: Union[Point2, Point3, Unit]) -> int:
""" Returns terrain z-height at a position. """
assert isinstance(pos, (Point2, Point3, Unit)), f"pos is not of type Point2, Point3 or Unit"
pos = pos.position.to2.rounded
return -16 + 32 * self._game_info.terrain_height[pos] / 255
def in_placement_grid(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if you can place something at a position.
Remember, buildings usually use 2x2, 3x3 or 5x5 of these grid points.
Caution: some x and y offset might be required, see ramp code:
https://github.com/Dentosal/python-sc2/blob/master/sc2/game_info.py#L17-L18 """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self._game_info.placement_grid[pos] == 1
def in_pathing_grid(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if a unit can pass through a grid point. """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self._game_info.pathing_grid[pos] == 1
def is_visible(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if you have vision on a grid point. """
# more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.visibility[pos] == 2
def has_creep(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if there is creep on the grid point. """
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.creep[pos] == 1
def _prepare_start(self, client, player_id, game_info, game_data):
"""Ran until game start to set game and player data."""
self._client: "Client" = client
self._game_info: "GameInfo" = game_info
self._game_data: GameData = game_data
self.player_id: int = player_id
self.race: Race = Race(self._game_info.player_races[self.player_id])
self._units_previous_map: dict = dict()
self._previous_upgrades: Set[UpgradeId] = set()
self.units: Units = Units([])
def _prepare_first_step(self):
"""First step extra preparations. Must not be called before _prepare_step."""
if self.townhalls:
self._game_info.player_start_location = self.townhalls.first.position
self._game_info.map_ramps, self._game_info.vision_blockers = self._game_info._find_ramps_and_vision_blockers()
def _prepare_step(self, state, proto_game_info):
# Set attributes from new state before on_step."""
self.state: GameState = state # See game_state.py
# update pathing grid
self._game_info.pathing_grid: PixelMap = PixelMap(
proto_game_info.game_info.start_raw.pathing_grid, in_bits=True, mirrored=False
)
# Required for events
self._units_previous_map: Dict = {unit.tag: unit for unit in self.units}
self.units: Units = state.own_units
self.workers: Units = self.units(race_worker[self.race])
self.townhalls: Units = self.units(race_townhalls[self.race])
self.geysers: Units = self.units(race_gas[self.race])
self.minerals: int = state.common.minerals
self.vespene: int = state.common.vespene
self.supply_army: int = state.common.food_army
self.supply_workers: int = state.common.food_workers # Doesn't include workers in production
self.supply_cap: int = state.common.food_cap
self.supply_used: int = state.common.food_used
self.supply_left: int = self.supply_cap - self.supply_used
if self.race == Race.Zerg:
self.larva_count: int = state.common.larva_count
# Workaround Zerg supply rounding bug
self._correct_zerg_supply()
elif self.race == Race.Protoss:
self.warp_gate_count: int = state.common.warp_gate_count
self.idle_worker_count: int = state.common.idle_worker_count
self.army_count: int = state.common.army_count
# reset cached values
self.cached_known_enemy_structures = None
self.cached_known_enemy_units = None
async def issue_events(self):
""" This function will be automatically run from main.py and triggers the following functions:
- on_unit_created
- on_unit_destroyed
- on_building_construction_complete
"""
await self._issue_unit_dead_events()
await self._issue_unit_added_events()
for unit in self.units.structure:
await self._issue_building_complete_event(unit)
if len(self._previous_upgrades) != len(self.state.upgrades):
for upgrade_completed in self.state.upgrades - self._previous_upgrades:
await self.on_upgrade_complete(upgrade_completed)
self._previous_upgrades = self.state.upgrades
async def _issue_unit_added_events(self):
for unit in self.units.not_structure:
if unit.tag not in self._units_previous_map:
await self.on_unit_created(unit)
for unit in self.units.structure:
if unit.tag not in self._units_previous_map:
await self.on_building_construction_started(unit)
async def _issue_building_complete_event(self, unit):
if unit.build_progress < 1:
return
if unit.tag not in self._units_previous_map:
return
unit_prev = self._units_previous_map[unit.tag]
if unit_prev.build_progress < 1:
await self.on_building_construction_complete(unit)
async def _issue_unit_dead_events(self):
for unit_tag in self.state.dead_units:
await self.on_unit_destroyed(unit_tag)
async def on_unit_destroyed(self, unit_tag):
""" Override this in your bot class.
Note that this function uses unit tags because the unit does not exist any more. """
async def on_unit_created(self, unit: Unit):
""" Override this in your bot class. """
async def on_building_construction_started(self, unit: Unit):
""" Override this in your bot class. """
async def on_building_construction_complete(self, unit: Unit):
""" Override this in your bot class. Note that this function is also
triggered at the start of the game for the starting base building."""
async def on_upgrade_complete(self, upgrade: UpgradeId):
""" Override this in your bot class. """
def on_start(self):
""" Allows initializing the bot when the game data is available. """
async def on_start_async(self):
""" This function is run after "on_start". At this point, game_data, game_info and
the first iteration of game_state (self.state) are available. """
async def on_step(self, iteration: int):
"""Ran on every game step (looped in realtime mode)."""
raise NotImplementedError
def on_end(self, game_result: Result):
""" Triggered at the end of a game. """
class CanAffordWrapper:
def __init__(self, can_afford_minerals, can_afford_vespene, have_enough_supply):
self.can_afford_minerals = can_afford_minerals
self.can_afford_vespene = can_afford_vespene
self.have_enough_supply = have_enough_supply
def __bool__(self):
return self.can_afford_minerals and self.can_afford_vespene and self.have_enough_supply
@property
def action_result(self):
if not self.can_afford_vespene:
return ActionResult.NotEnoughVespene
elif not self.can_afford_minerals:
return ActionResult.NotEnoughMinerals
elif not self.have_enough_supply:
return ActionResult.NotEnoughFood
else:
return None
| 2.34375 | 2 |
src/python/pants/jvm/resolve/lockfile_metadata.py | xyzst/pants | 0 | 2916 | # Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import Any, Iterable, cast
from pants.core.util_rules.lockfile_metadata import (
LockfileMetadata,
LockfileMetadataValidation,
LockfileScope,
_get_metadata,
lockfile_metadata_registrar,
)
from pants.jvm.resolve.common import ArtifactRequirement
from pants.util.ordered_set import FrozenOrderedSet
_jvm_lockfile_metadata = lockfile_metadata_registrar(LockfileScope.JVM)
class InvalidJVMLockfileReason(Enum):
REQUIREMENTS_MISMATCH = "requirements_mismatch"
@dataclass(frozen=True)
class JVMLockfileMetadata(LockfileMetadata):
scope = LockfileScope.JVM
@staticmethod
def new(
requirements: Iterable[ArtifactRequirement],
) -> JVMLockfileMetadata:
"""Call the most recent version of the `LockfileMetadata` class to construct a concrete
instance.
This static method should be used in place of the `LockfileMetadata` constructor. This gives
calling sites a predictable method to call to construct a new `LockfileMetadata` for
writing, while still allowing us to support _reading_ older, deprecated metadata versions.
"""
return JVMLockfileMetadataV1.from_artifact_requirements(requirements)
@classmethod
def from_lockfile(
cls, lockfile: bytes, lockfile_path: str | None = None, resolve_name: str | None = None
) -> JVMLockfileMetadataV1:
return cast(
JVMLockfileMetadataV1,
LockfileMetadata.from_lockfile_for_scope(
LockfileScope.JVM, lockfile, lockfile_path, resolve_name
),
)
def is_valid_for(
self,
requirements: Iterable[ArtifactRequirement] | None,
) -> LockfileMetadataValidation:
"""Returns Truthy if this `JVMLockfileMetadata` can be used in the current execution
context."""
raise NotImplementedError("call `is_valid_for` on subclasses only")
@_jvm_lockfile_metadata(1)
@dataclass(frozen=True)
class JVMLockfileMetadataV1(JVMLockfileMetadata):
"""Lockfile version that permits specifying a requirements as a set rather than a digest.
Validity is tested by the set of requirements strings being the same in the user requirements as
those in the stored requirements.
"""
requirements: FrozenOrderedSet[str]
@classmethod
def from_artifact_requirements(
cls, requirements: Iterable[ArtifactRequirement]
) -> JVMLockfileMetadataV1:
return cls(FrozenOrderedSet(i.to_metadata_str() for i in requirements))
@classmethod
def _from_json_dict(
cls: type[JVMLockfileMetadataV1],
json_dict: dict[Any, Any],
lockfile_description: str,
error_suffix: str,
) -> JVMLockfileMetadataV1:
metadata = _get_metadata(json_dict, lockfile_description, error_suffix)
requirements = metadata(
"generated_with_requirements",
FrozenOrderedSet[str],
FrozenOrderedSet,
)
return JVMLockfileMetadataV1(requirements)
@classmethod
def additional_header_attrs(cls, instance: LockfileMetadata) -> dict[Any, Any]:
instance = cast(JVMLockfileMetadataV1, instance)
return {
"generated_with_requirements": (
sorted(instance.requirements) if instance.requirements is not None else None
)
}
def is_valid_for(
self,
requirements: Iterable[ArtifactRequirement] | None,
) -> LockfileMetadataValidation:
"""Returns a truthy object if the request requirements match the metadata requirements.
For this version, "match" is defined as the request requirements being a non-strict subset
of the metadata requirements.
"""
failure_reasons: set[InvalidJVMLockfileReason] = set()
if not self.requirements.issuperset(i.to_metadata_str() for i in requirements or []):
failure_reasons.add(InvalidJVMLockfileReason.REQUIREMENTS_MISMATCH)
return LockfileMetadataValidation(failure_reasons)
| 2.1875 | 2 |
generator/generator.py | GregorKikelj/opendbc | 1,059 | 2917 | #!/usr/bin/env python3
import os
import re
cur_path = os.path.dirname(os.path.realpath(__file__))
opendbc_root = os.path.join(cur_path, '../')
include_pattern = re.compile(r'CM_ "IMPORT (.*?)";')
def read_dbc(src_dir, filename):
with open(os.path.join(src_dir, filename)) as file_in:
return file_in.read()
def create_dbc(src_dir, filename, output_path):
dbc_file_in = read_dbc(src_dir, filename)
includes = include_pattern.findall(dbc_file_in)
output_filename = filename.replace('.dbc', '_generated.dbc')
output_file_location = os.path.join(output_path, output_filename)
with open(output_file_location, 'w') as dbc_file_out:
dbc_file_out.write('CM_ "AUTOGENERATED FILE, DO NOT EDIT";\n')
for include_filename in includes:
include_file_header = '\n\nCM_ "Imported file %s starts here";\n' % include_filename
dbc_file_out.write(include_file_header)
include_file = read_dbc(src_dir, include_filename)
dbc_file_out.write(include_file)
dbc_file_out.write('\nCM_ "%s starts here";\n' % filename)
core_dbc = include_pattern.sub('', dbc_file_in)
dbc_file_out.write(core_dbc)
def create_all(output_path):
for src_dir, _, filenames in os.walk(cur_path):
if src_dir == cur_path:
continue
#print(src_dir)
for filename in filenames:
if filename.startswith('_') or not filename.endswith('.dbc'):
continue
#print(filename)
create_dbc(src_dir, filename, output_path)
if __name__ == "__main__":
create_all(opendbc_root)
| 2.71875 | 3 |
customer/admin.py | matheusdemicheli/dogtel | 0 | 2918 | from django.contrib import admin
from django.utils.safestring import mark_safe
from customer.models import Owner, Dog, Breed, SubBreed
class OwnerAdmin(admin.ModelAdmin):
"""
Owner ModelAdmin.
"""
search_fields = ['name']
class BreedAdmin(admin.ModelAdmin):
"""
Breed ModelAdmin.
"""
search_fields = ['name']
class SubBreedAdmin(admin.ModelAdmin):
"""
SubBreed ModelAdmin.
"""
search_fields = ['name', 'breed__name']
autocomplete_fields = ['breed']
list_display = ['name', 'breed']
class DogAdmin(admin.ModelAdmin):
"""
Dog ModelAdmin.
"""
search_fields = ['name', 'owner__name']
autocomplete_fields = ['owner', 'breed', 'sub_breed']
list_display = ['name', 'owner', 'breed', 'sub_breed', 'img_photo']
def img_photo(self, obj):
"""
Render the dog's photo.
"""
return mark_safe('<img src="%s" width="70">' % obj.photo.url)
admin.site.register(Dog, DogAdmin)
admin.site.register(Owner, OwnerAdmin)
admin.site.register(Breed, BreedAdmin)
admin.site.register(SubBreed, SubBreedAdmin) | 2.078125 | 2 |
kaneda/tasks/rq.py | APSL/kaneda | 59 | 2919 | from __future__ import absolute_import
from redis import Redis
from rq.decorators import job
from kaneda.utils import get_backend
backend = get_backend()
@job(queue='kaneda', connection=Redis())
def report(name, metric, value, tags, id_):
"""
RQ job to report metrics to the configured backend in kanedasettings.py
To run the worker execute this command:
rqworker [queue]
"""
return backend.report(name, metric, value, tags, id_)
| 2.421875 | 2 |
src/ripper.py | jg-rivera/cert-ripper | 0 | 2920 | from dotenv import load_dotenv
from PyPDF2 import PdfFileReader, PdfFileWriter
import os
import json
class CertRipper:
def __init__(
self,
start_page_index=0,
master_pdf_path=None,
json_points_path=None,
ripped_certs_path=None,
ripped_cert_file_name=None,
):
self.start_page_index = start_page_index
self.master_pdf_path = master_pdf_path
self.pdf = PdfFileReader(master_pdf_path)
self.pdf_length = self.pdf.getNumPages()
self.json_points_path = json_points_path
self.ripped_certs_path = ripped_certs_path
self.ripped_cert_file_name = ripped_cert_file_name
def process(self):
recipient_groups = self.get_recipient_groups_from_points()
self.extract_pdf_from_master(recipient_groups)
def extract_pdf_from_master(self, recipient_groups):
current_page_index = self.start_page_index
process_index = 0
for recipient_group in recipient_groups:
recipient_group_name = recipient_group["name"]
recipient_group_tag = recipient_group["tag"]
recipient_slugs = recipient_group["recipient_slugs"]
print(
f"[*] Ripping \x1b[93m{recipient_group_name}\x1b[0m group ...")
for recipient_slug in recipient_slugs:
page = self.pdf.getPage(current_page_index)
file_name = self.ripped_cert_file_name.format(
index=current_page_index + 1,
tag=recipient_group_tag,
recipient=recipient_slug
)
pdf_writer = PdfFileWriter()
pdf_writer.addPage(page)
output_file_name = f"{self.ripped_certs_path}\\{file_name}.pdf"
with open(output_file_name, "wb") as out:
pdf_writer.write(out)
print(
f"\x1b[95m[{process_index}]\x1b[0m Ripped \x1b[92m[{file_name}]\x1b[0m from \x1b[94mpage {current_page_index + 1}\x1b[0m of master")
current_page_index += 1
process_index += 1
def get_recipient_groups_from_points(self):
recipient_groups = []
total_recipients = 0
with open(self.json_points_path, "r") as json_file:
points = json.load(json_file)
for point in points:
point_name = point["name"]
point_tag = point["tag"]
point_recipients = point["recipients"]
point_recipient_slugs = []
for point_recipient in point_recipients:
recipient_name = point_recipient["name"]
recipient_name_slug = "_".join(recipient_name.split())
point_recipient_slugs.append(recipient_name_slug)
total_recipients += 1
recipient_groups.append({
"name": point_name,
"tag": point_tag,
"recipient_slugs": point_recipient_slugs
})
total_groups = len(recipient_groups)
self.__check_pdf_length(total_recipients)
print(
f"Read \x1b[95m{total_groups} groups(s)\x1b[0m and \x1b[95m{total_recipients} recipient(s)\x1b[0m from JSON points")
return recipient_groups
def __check_pdf_length(self, recipients_length):
pdf_length = self.pdf_length - (self.start_page_index)
if pdf_length != recipients_length:
raise ValueError(
f"Number of recipients ({recipients_length}) does not match with PDF length ({pdf_length})"
)
if __name__ == "__main__":
load_dotenv()
ripper = CertRipper(
start_page_index=os.getenv("START_PAGE_INDEX"),
master_pdf_path=os.getenv("MASTER_PDF_PATH"),
json_points_path=os.getenv("JSON_POINTS_PATH"),
ripped_certs_path=os.getenv("RIPPED_CERTS_PATH"),
ripped_cert_file_name=os.getenv("RIPPED_CERT_FILE_NAME"),
)
ripper.process()
| 2.8125 | 3 |
venv/Lib/site-packages/tests/test_111_FieldNumAddCol.py | shehzadulislam/Assignment4 | 0 | 2921 | #
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_111_FieldNumAddCol(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_111)
def run_test_111(self):
conn = ibm_db.connect(config.database, config.user, config.password)
server = ibm_db.server_info( conn )
if conn:
ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF)
insert = "INSERT INTO animals values (7, 'cat', 'Benji', 5.1)"
ibm_db.exec_immediate(conn, insert)
stmt = ibm_db.exec_immediate(conn, "SELECT breed, COUNT(breed) AS number FROM animals GROUP BY breed ORDER BY breed")
if (server.DBMS_NAME[0:3] == 'IDS'):
num1 = ibm_db.field_num(stmt, "id")
num2 = ibm_db.field_num(stmt, "breed")
num3 = ibm_db.field_num(stmt, "number")
num4 = ibm_db.field_num(stmt, "NUMBER")
num5 = ibm_db.field_num(stmt, "bREED")
num6 = ibm_db.field_num(stmt, 8)
num7 = ibm_db.field_num(stmt, 1)
num8 = ibm_db.field_num(stmt, "WEIGHT")
else:
num1 = ibm_db.field_num(stmt, "ID")
num2 = ibm_db.field_num(stmt, "BREED")
num3 = ibm_db.field_num(stmt, "NUMBER")
num4 = ibm_db.field_num(stmt, "number")
num5 = ibm_db.field_num(stmt, "Breed")
num6 = ibm_db.field_num(stmt, 8)
num7 = ibm_db.field_num(stmt, 1)
num8 = ibm_db.field_num(stmt, "weight")
print("%s" % num1)
print("int(%d)" % num2)
print("int(%d)" % num3)
print("%s" % num4)
print("%s" % num5)
print("%s" % num6)
print("int(%d)" % num7)
print("%s" % num8)
ibm_db.rollback(conn)
else:
print("Connection failed.")
#__END__
#__LUW_EXPECTED__
#False
#int(0)
#int(1)
#False
#False
#False
#int(1)
#False
#__ZOS_EXPECTED__
#False
#int(0)
#int(1)
#False
#False
#False
#int(1)
#False
#__SYSTEMI_EXPECTED__
#False
#int(0)
#int(1)
#False
#False
#False
#int(1)
#False
#__IDS_EXPECTED__
#False
#int(0)
#int(1)
#False
#False
#False
#int(1)
#False
| 2.484375 | 2 |
foundation/djangocms_pagebanner/cms_toolbar.py | Mindelirium/foundation | 0 | 2922 | <reponame>Mindelirium/foundation<gh_stars>0
from cms.api import get_page_draft
from cms.toolbar_pool import toolbar_pool
from cms.toolbar_base import CMSToolbar
from cms.utils import get_cms_setting
from cms.utils.permissions import has_page_change_permission
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.translation import ugettext_lazy as _
from .models import PageBannerExtension
_banner_change_url = 'admin:djangocms_pagebanner_pagebannerextension_change'
_banner_add_url = 'admin:djangocms_pagebanner_pagebannerextension_add'
@toolbar_pool.register
class PageBannerExtensionToolbar(CMSToolbar):
def populate(self):
# always use draft if we have a page
self.page = get_page_draft(self.request.current_page)
if not self.page:
# Nothing to do
return
# check global permissions if CMS_PERMISSIONS is active
if get_cms_setting('PERMISSION'):
has_global_current_page_change_permission = \
has_page_change_permission(self.request)
else:
has_global_current_page_change_permission = False
# check if user has page edit permission
can_change = (self.request.current_page and
self.request.current_page.has_change_permission(
self.request))
if has_global_current_page_change_permission or can_change:
try:
page_banner_extension = PageBannerExtension.objects.get(
extended_object_id=self.page.id)
except PageBannerExtension.DoesNotExist:
page_banner_extension = None
try:
if page_banner_extension:
url = reverse(_banner_change_url,
args=(page_banner_extension.pk,))
else:
url = (reverse(_banner_add_url) +
'?extended_object=%s' % self.page.pk)
except NoReverseMatch:
# not in urls
pass
else:
not_edit_mode = not self.toolbar.edit_mode
current_page_menu = self.toolbar.get_or_create_menu('page')
current_page_menu.add_modal_item(_('Page banner'),
url=url,
disabled=not_edit_mode)
| 1.953125 | 2 |
tasks/lm/models/lm.py | etri-edgeai/nn-comp-discblock | 10 | 2923 | <gh_stars>1-10
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False, encoder=None, decoder=None):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
if encoder is None:
self.encoder = nn.Embedding(ntoken, ninp)
else:
self.encoder = encoder
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
if decoder is None:
self.decoder = nn.Linear(nhid, ntoken)
else:
self.decoder = decoder
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
if self.encoder.__class__.__name__ == "Embedding":
self.encoder.weight.data.uniform_(-initrange, initrange)
else:
self.encoder.init_weights()
if self.decoder.__class__.__name__ == "Linear":
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
else:
self.decoder.init_weights()
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
hidden_ = []
for h in hidden:
if isinstance(h, torch.LongTensor) or isinstance(h, torch.cuda.LongTensor):
h = h.to(torch.float)
hidden_.append(h)
output, hidden = self.rnn(emb, tuple(hidden_))
output = self.drop(output)
decoded = self.decoder(output)
decoded = decoded.view(-1, self.ntoken)
return F.log_softmax(decoded, dim=1), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.
class PositionalEncoding(nn.Module):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoding(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
Shape:
x: [sequence length, batch size, embed dim]
output: [sequence length, batch size, embed dim]
Examples:
>>> output = pos_encoder(x)
"""
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
"""Container module with an encoder, a recurrent or transformer module, and a decoder."""
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5, encoder=None, decoder=None):
super(TransformerModel, self).__init__()
try:
from torch.nn import TransformerEncoder, TransformerEncoderLayer
except:
raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
if encoder is None:
self.encoder = nn.Embedding(ntoken, ninp)
else:
self.encoder = encoder
self.ninp = ninp
if decoder is None:
self.decoder = nn.Linear(nhid, ntoken)
else:
self.decoder = decoder
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
if self.encoder.__class__.__name__ == "Embedding":
self.encoder.weight.data.uniform_(-initrange, initrange)
else:
self.encoder.init_weights()
if self.decoder.__class__.__name__ == "Linear":
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
else:
self.decoder.init_weights()
def forward(self, src, has_mask=True):
if has_mask:
device = src.device
if self.src_mask is None or self.src_mask.size(0) != len(src):
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
else:
self.src_mask = None
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return F.log_softmax(output, dim=-1)
| 2.8125 | 3 |
fibo.py | Baibhabswain/pythonPrograms | 1 | 2924 | <reponame>Baibhabswain/pythonPrograms
def main():
a=input("The enter the first number :")
b=input("The enter the second number :")
range=input("Please enter the range")
i=0;count=0;
print a
print b
while count!=range:
c=a+b
count +=1
print c
a=b
b=c
main() | 3.734375 | 4 |
scrapper/playstation/__init__.py | gghf-service/gghf-api | 1 | 2925 | from scrapper.playstation.spider import main | 1.085938 | 1 |
plugins/number.py | motakine/ILAS_slackbot | 0 | 2926 | import slackbot.bot
import random
answer = random.randint(1, 50)
max = 50
def number(num):
'''number 判定
Args:
num (int): 判定する数字
Returns:
str: num が answer より大きい: 'Too large'
num が answer より小さい: 'Too small'
num が answer と一致: 'Correct!'、新しくゲームを始める
その他: 'Can I kick you?.'
0は不可思議な数である
maxが1の時に2以上を答えると1だけだと言われる
'''
global answer
global max
# 入力された値に応じて返答を構成、正解ならニューゲーム
if num == 0:
return ' is a mysterious number...'
elif num < max + 1:
if num > answer:
return ' is too large. The answer is more small.'
elif num < answer:
return ' is too small. The answer is more large.'
elif num == answer:
answer = random.randint(1, max)
return ' is correct! :tada: Now, start a new game.'
elif max == 1:
return '? Can I kick you? Only 1.'
return '? Can I kick you? 1 to %d.' % max
def number_set(num):
'''number set判定
Args:
num (int): 判定する数字
Returns:
str: 答えのmax(答えになりうる値の最大)を変更する。デフォは50
1にするとマジ?と訊かれる。それだけ。
不可思議な数字は0である
'''
global answer
global max
# 入力された値に応じて返答を構成、maxを変更、ニューゲーム
if num == 0:
return 'There is a mysterious number... It is '
elif num == 1:
max = 1
answer = random.randint(1, max)
return '1? Really? Then, the maximum of the answer is '
max = num
answer = random.randint(1, max)
return 'OK. Then, the maximum of the answer is '
@slackbot.bot.respond_to(r'^number\s+set\s+(\d+)')
def resp_set(message, digitstr):
'''number set (数字) 形式への返答
(数字)部のnumber set判定を行い、返事する
Args:
'''
# number set 判定
nbs = number_set(int(digitstr))
# 返事する文字列を構成
reply = '{0:s}{1:s}.'.format(nbs, digitstr)
message.reply(reply)
@slackbot.bot.respond_to(r'^number\s+(\d+)')
def resp_number(message, digitstr):
'''number (数字) 形式への返答
(数字) 部のnumber判定を行い, 'number (数字) 判定' を返事する
Args:
message (slackbot.dispatcher.Message): slack message
digtstr (str): 数値の文字列
'''
# number 判定
nb = number(int(digitstr))
# 返事する文字列を構成
reply = '{0:s}{1:s}'.format(digitstr, nb)
message.reply(reply)
@slackbot.bot.respond_to(r'^number\s+giveup')
def resp_giveup(message):
'''number giveup への返答
正解を表示し、新しい正解を設定、'Start a new game.'を返す
Args:
'''
global answer
global max
# 表示する答えを設定、次のゲームの解答を設定
showanswer = answer
answer = random.randint(1, max)
# 返事する文字列を構成
message.reply('Hahaha! Failed! :ghost: The answer is %d. Start a new game.' % showanswer)
message.react('stuck_out_tongue_winking_eye')
| 3.90625 | 4 |
pytype/analyze.py | hatal175/pytype | 0 | 2927 | """Code for checking and inferring types."""
import collections
import logging
import re
import subprocess
from typing import Any, Dict, Union
from pytype import abstract
from pytype import abstract_utils
from pytype import convert_structural
from pytype import debug
from pytype import function
from pytype import metrics
from pytype import output
from pytype import special_builtins
from pytype import state as frame_state
from pytype import vm
from pytype.overlays import typing_overlay
from pytype.pytd import builtins
from pytype.pytd import escape
from pytype.pytd import optimize
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import visitors
from pytype.typegraph import cfg
log = logging.getLogger(__name__)
# Most interpreter functions (including lambdas) need to be analyzed as
# stand-alone functions. The exceptions are comprehensions and generators, which
# have names like "<listcomp>" and "<genexpr>".
_SKIP_FUNCTION_RE = re.compile("<(?!lambda).+>$")
CallRecord = collections.namedtuple(
"CallRecord", ["node", "function", "signatures", "positional_arguments",
"keyword_arguments", "return_value"])
# How deep to follow call chains:
INIT_MAXIMUM_DEPTH = 4 # during module loading
MAXIMUM_DEPTH = 3 # during non-quick analysis
QUICK_CHECK_MAXIMUM_DEPTH = 2 # during quick checking
QUICK_INFER_MAXIMUM_DEPTH = 1 # during quick inference
class _Initializing:
pass
class CallTracer(vm.VirtualMachine):
"""Virtual machine that records all function calls.
Attributes:
exitpoint: A CFG node representing the program exit. Needs to be set before
analyze_types.
"""
_CONSTRUCTORS = ("__new__", "__init__")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._unknowns = {}
self._calls = set()
self._method_calls = set()
# Used by init_class.
self._instance_cache: Dict[Any, Union[_Initializing, cfg.Variable]] = {}
# Used by call_init. Can differ from _instance_cache because we also call
# __init__ on classes not initialized via init_class.
self._initialized_instances = set()
self._interpreter_functions = []
self._interpreter_classes = []
self._analyzed_functions = set()
self._analyzed_classes = set()
self._generated_classes = {}
self.exitpoint = None
def create_varargs(self, node):
value = abstract.Instance(self.convert.tuple_type, self)
value.merge_instance_type_parameter(
node, abstract_utils.T, self.convert.create_new_unknown(node))
return value.to_variable(node)
def create_kwargs(self, node):
key_type = self.convert.primitive_class_instances[str].to_variable(node)
value_type = self.convert.create_new_unknown(node)
kwargs = abstract.Instance(self.convert.dict_type, self)
kwargs.merge_instance_type_parameter(node, abstract_utils.K, key_type)
kwargs.merge_instance_type_parameter(node, abstract_utils.V, value_type)
return kwargs.to_variable(node)
def create_method_arguments(self, node, method, use_defaults=False):
"""Create arguments for the given method.
Creates Unknown objects as arguments for the given method. Note that we
don't need to take parameter annotations into account as
InterpreterFunction.call() will take care of that.
Args:
node: The current node.
method: An abstract.InterpreterFunction.
use_defaults: Whether to use parameter defaults for arguments. When True,
unknown arguments are created with force=False, as it is fine to use
Unsolvable rather than Unknown objects for type-checking defaults.
Returns:
A tuple of a node and a function.Args object.
"""
args = []
num_posargs = method.argcount(node)
num_posargs_no_default = num_posargs - len(method.defaults)
for i in range(num_posargs):
default_idx = i - num_posargs_no_default
if use_defaults and default_idx >= 0:
arg = method.defaults[default_idx]
else:
arg = self.convert.create_new_unknown(node, force=not use_defaults)
args.append(arg)
kws = {}
for key in method.signature.kwonly_params:
if use_defaults and key in method.kw_defaults:
kws[key] = method.kw_defaults[key]
else:
kws[key] = self.convert.create_new_unknown(node, force=not use_defaults)
starargs = self.create_varargs(node) if method.has_varargs() else None
starstarargs = self.create_kwargs(node) if method.has_kwargs() else None
return node, function.Args(posargs=tuple(args),
namedargs=kws,
starargs=starargs,
starstarargs=starstarargs)
def call_function_with_args(self, node, val, args):
"""Call a function.
Args:
node: The given node.
val: A cfg.Binding containing the function.
args: A function.Args object.
Returns:
A tuple of (1) a node and (2) a cfg.Variable of the return value.
"""
fvar = val.AssignToNewVariable(node)
with val.data.record_calls():
new_node, ret = self.call_function_in_frame(node, fvar, *args)
return new_node, ret
def call_function_in_frame(self, node, var, args, kwargs,
starargs, starstarargs):
frame = frame_state.SimpleFrame(node=node)
self.push_frame(frame)
log.info("Analyzing %r", [v.name for v in var.data])
state = frame_state.FrameState.init(node, self)
state, ret = self.call_function_with_state(
state, var, args, kwargs, starargs, starstarargs)
self.pop_frame(frame)
return state.node, ret
def _maybe_fix_classmethod_cls_arg(self, node, cls, func, args):
sig = func.signature
if (args.posargs and sig.param_names and
(sig.param_names[0] not in sig.annotations)):
# fix "cls" parameter
return args._replace(
posargs=(cls.AssignToNewVariable(node),) + args.posargs[1:])
else:
return args
def maybe_analyze_method(self, node, val, cls=None):
method = val.data
fname = val.data.name
if isinstance(method, abstract.INTERPRETER_FUNCTION_TYPES):
self._analyzed_functions.add(method.get_first_opcode())
if (not self.options.analyze_annotated and
(method.signature.has_return_annotation or method.has_overloads) and
fname.rsplit(".", 1)[-1] not in self._CONSTRUCTORS):
log.info("%r has annotations, not analyzing further.", fname)
else:
for f in method.iter_signature_functions():
node, args = self.create_method_arguments(node, f)
if f.is_classmethod and cls:
args = self._maybe_fix_classmethod_cls_arg(node, cls, f, args)
node, _ = self.call_function_with_args(node, val, args)
return node
def _call_with_fake_args(self, node0, funcv):
"""Attempt to call the given function with made-up arguments."""
# TODO(tsudol): If expand this beyond __init__, need to handle
# DictKeyMissing
nodes = []
rets = []
for funcb in funcv.bindings:
func = funcb.data
log.info("Trying %s with fake arguments", func)
if isinstance(func, abstract.INTERPRETER_FUNCTION_TYPES):
node1, args = self.create_method_arguments(node0, func)
# Once the args are generated, try calling the function.
# call_function will check fallback_to_unsolvable if a DictKeyMissing or
# FailedFunctionCall error is raised when the target function is called.
# DictKeyMissing doesn't trigger call_with_fake_args, so that shouldn't
# be raised again, and generating fake arguments should avoid any
# FailedFunctionCall errors. To prevent an infinite recursion loop, set
# fallback_to_unsolvable to False just in case.
# This means any additional errors that may be raised will be passed to
# the call_function that called this method in the first place.
node2, ret = self.call_function(node1,
funcb.AssignToNewVariable(),
args,
fallback_to_unsolvable=False)
nodes.append(node2)
rets.append(ret)
if nodes:
ret = self.join_variables(node0, rets)
node = self.join_cfg_nodes(nodes)
if ret.bindings:
return node, ret
else:
node = node0
log.info("Unable to generate fake arguments for %s", funcv)
return node, self.new_unsolvable(node)
def analyze_method_var(self, node0, name, var, cls=None):
log.info("Analyzing %s", name)
node1 = node0.ConnectNew(name)
for val in var.bindings:
node2 = self.maybe_analyze_method(node1, val, cls)
node2.ConnectTo(node0)
return node0
def bind_method(self, node, name, methodvar, instance_var):
bound = self.program.NewVariable()
for m in methodvar.Data(node):
if isinstance(m, special_builtins.ClassMethodInstance):
m = m.func.data[0]
is_cls = True
else:
is_cls = (m.isinstance_InterpreterFunction() and m.is_classmethod)
bound.AddBinding(m.property_get(instance_var, is_cls), [], node)
return bound
def _instantiate_binding(self, node0, cls, container):
"""Instantiate a class binding."""
node1, new = cls.data.get_own_new(node0, cls)
if not new or (
any(not isinstance(f, abstract.InterpreterFunction) for f in new.data)):
# This assumes that any inherited __new__ method defined in a pyi file
# returns an instance of the current class.
return node0, cls.data.instantiate(node0, container=container)
instance = self.program.NewVariable()
nodes = []
for b in new.bindings:
self._analyzed_functions.add(b.data.get_first_opcode())
node2, args = self.create_method_arguments(node1, b.data)
args = self._maybe_fix_classmethod_cls_arg(node0, cls, b.data, args)
node3 = node2.ConnectNew()
node4, ret = self.call_function_with_args(node3, b, args)
instance.PasteVariable(ret)
nodes.append(node4)
return self.join_cfg_nodes(nodes), instance
def _instantiate_var(self, node, clsv, container):
"""Build an (dummy) instance from a class, for analyzing it."""
n = self.program.NewVariable()
for cls in clsv.Bindings(node, strict=False):
node, var = self._instantiate_binding(node, cls, container)
n.PasteVariable(var)
return node, n
def _mark_maybe_missing_members(self, values):
"""Set maybe_missing_members to True on these values and their type params.
Args:
values: A list of BaseValue objects. On every instance among
the values, recursively set maybe_missing_members to True on the
instance and its type parameters.
"""
values = list(values)
seen = set()
while values:
v = values.pop(0)
if v not in seen:
seen.add(v)
if isinstance(v, abstract.SimpleValue):
v.maybe_missing_members = True
for child in v.instance_type_parameters.values():
values.extend(child.data)
def init_class(self, node, cls, container=None, extra_key=None):
"""Instantiate a class, and also call __init__.
Calling __init__ can be expensive, so this method caches its created
instances. If you don't need __init__ called, use cls.instantiate instead.
Args:
node: The current node.
cls: The class to instantiate.
container: Optionally, a container to pass to the class's instantiate()
method, so that type parameters in the container's template are
instantiated to TypeParameterInstance.
extra_key: Optionally, extra information about the location at which the
instantion occurs. By default, this method keys on the current opcode
and the class, which sometimes isn't enough to disambiguate callers
that shouldn't get back the same cached instance.
Returns:
A tuple of node and instance variable.
"""
key = (self.frame and self.frame.current_opcode, extra_key, cls)
instance = self._instance_cache.get(key)
if not instance or isinstance(instance, _Initializing):
clsvar = cls.to_variable(node)
node, instance = self._instantiate_var(node, clsvar, container)
if key in self._instance_cache:
# We've encountered a recursive pattern such as
# class A:
# def __init__(self, x: "A"): ...
# Calling __init__ again would lead to an infinite loop, so
# we instead create an incomplete instance that will be
# overwritten later. Note that we have to create a new
# instance rather than using the one that we're already in
# the process of initializing - otherwise, setting
# maybe_missing_members to True would cause pytype to ignore
# all attribute errors on self in __init__.
self._mark_maybe_missing_members(instance.data)
else:
self._instance_cache[key] = _Initializing()
node = self.call_init(node, instance)
self._instance_cache[key] = instance
return node, instance
def _call_method(self, node, binding, method_name):
node, method = self.attribute_handler.get_attribute(
node, binding.data.get_class(), method_name, binding)
if method:
bound_method = self.bind_method(
node, method_name, method, binding.AssignToNewVariable())
node = self.analyze_method_var(node, method_name, bound_method)
return node
def _call_init_on_binding(self, node, b):
if isinstance(b.data, abstract.SimpleValue):
for param in b.data.instance_type_parameters.values():
node = self.call_init(node, param)
node = self._call_method(node, b, "__init__")
cls = b.data.get_class()
if isinstance(cls, abstract.InterpreterClass):
# Call any additional initalizers the class has registered.
for method in cls.additional_init_methods:
node = self._call_method(node, b, method)
return node
def call_init(self, node, instance):
# Call __init__ on each binding.
for b in instance.bindings:
if b.data in self._initialized_instances:
continue
self._initialized_instances.add(b.data)
node = self._call_init_on_binding(node, b)
return node
def reinitialize_if_initialized(self, node, instance):
if instance in self._initialized_instances:
self._call_init_on_binding(node, instance.to_binding(node))
def analyze_class(self, node, val):
self._analyzed_classes.add(val.data)
node, instance = self.init_class(node, val.data)
good_instances = [b for b in instance.bindings if val.data == b.data.cls]
if not good_instances:
# __new__ returned something that's not an instance of our class.
instance = val.data.instantiate(node)
node = self.call_init(node, instance)
elif len(good_instances) != len(instance.bindings):
# __new__ returned some extra possibilities we don't need.
instance = self.join_bindings(node, good_instances)
for instance_value in instance.data:
val.data.register_canonical_instance(instance_value)
for name, methodvar in sorted(val.data.members.items()):
if name in self._CONSTRUCTORS:
continue # We already called this method during initialization.
b = self.bind_method(node, name, methodvar, instance)
node = self.analyze_method_var(node, name, b, val)
return node
def analyze_function(self, node0, val):
if val.data.is_attribute_of_class:
# We'll analyze this function as part of a class.
log.info("Analyze functions: Skipping class method %s", val.data.name)
else:
node1 = node0.ConnectNew(val.data.name)
node2 = self.maybe_analyze_method(node1, val)
node2.ConnectTo(node0)
return node0
def _should_analyze_as_interpreter_function(self, data):
# We record analyzed functions by opcode rather than function object. The
# two ways of recording are equivalent except for closures, which are
# re-generated when the variables they close over change, but we don't want
# to re-analyze them.
return (isinstance(data, abstract.InterpreterFunction) and
not data.is_overload and
not data.is_class_builder and
data.get_first_opcode() not in self._analyzed_functions and
not _SKIP_FUNCTION_RE.search(data.name))
def analyze_toplevel(self, node, defs):
for name, var in sorted(defs.items()): # sort, for determinicity
if not self._is_typing_member(name, var):
for value in var.bindings:
if isinstance(value.data, abstract.InterpreterClass):
new_node = self.analyze_class(node, value)
elif (isinstance(value.data, abstract.INTERPRETER_FUNCTION_TYPES) and
not value.data.is_overload):
new_node = self.analyze_function(node, value)
else:
continue
if new_node is not node:
new_node.ConnectTo(node)
# Now go through all functions and classes we haven't analyzed yet.
# These are typically hidden under a decorator.
# Go through classes first so that the `is_attribute_of_class` will
# be set for all functions in class.
for c in self._interpreter_classes:
for value in c.bindings:
if (isinstance(value.data, abstract.InterpreterClass) and
value.data not in self._analyzed_classes):
node = self.analyze_class(node, value)
for f in self._interpreter_functions:
for value in f.bindings:
if self._should_analyze_as_interpreter_function(value.data):
node = self.analyze_function(node, value)
return node
def analyze(self, node, defs, maximum_depth):
assert not self.frame
self.maximum_depth = maximum_depth
self._analyzing = True
node = node.ConnectNew(name="Analyze")
return self.analyze_toplevel(node, defs)
def trace_unknown(self, name, unknown_binding):
self._unknowns[name] = unknown_binding
def trace_call(self, node, func, sigs, posargs, namedargs, result):
"""Add an entry into the call trace.
Args:
node: The CFG node right after this function call.
func: A cfg.Binding of a function that was called.
sigs: The signatures that the function might have been called with.
posargs: The positional arguments, an iterable over cfg.Value.
namedargs: The keyword arguments, a dict mapping str to cfg.Value.
result: A Variable of the possible result values.
"""
log.debug("Logging call to %r with %d args, return %r",
func, len(posargs), result)
args = tuple(posargs)
kwargs = tuple((namedargs or {}).items())
record = CallRecord(node, func, sigs, args, kwargs, result)
if isinstance(func.data, abstract.BoundPyTDFunction):
self._method_calls.add(record)
elif isinstance(func.data, abstract.PyTDFunction):
self._calls.add(record)
def trace_functiondef(self, f):
self._interpreter_functions.append(f)
def trace_classdef(self, c):
self._interpreter_classes.append(c)
def trace_namedtuple(self, nt):
# All namedtuple instances with the same name are equal, so it's fine to
# overwrite previous instances.
self._generated_classes[nt.name] = nt
def pytd_classes_for_unknowns(self):
classes = []
for name, val in self._unknowns.items():
if val in val.variable.Filter(self.exitpoint, strict=False):
classes.append(val.data.to_structural_def(self.exitpoint, name))
return classes
def pytd_for_types(self, defs):
# If a variable is annotated, we'll always output that type.
annotated_names = set()
data = []
pytd_convert = self.convert.pytd_convert
annots = abstract_utils.get_annotations_dict(defs)
for name, t in pytd_convert.annotations_to_instance_types(
self.exitpoint, annots):
annotated_names.add(name)
data.append(pytd.Constant(name, t))
for name, var in defs.items():
if (name in output.TOP_LEVEL_IGNORE or name in annotated_names or
self._is_typing_member(name, var)):
continue
options = var.FilteredData(self.exitpoint, strict=False)
if (len(options) > 1 and
not all(isinstance(o, abstract.FUNCTION_TYPES) for o in options)):
# It's ambiguous whether this is a type, a function or something
# else, so encode it as a constant.
combined_types = pytd_utils.JoinTypes(t.to_type(self.exitpoint)
for t in options)
data.append(pytd.Constant(name, combined_types))
elif options:
for option in options:
try:
d = option.to_pytd_def(self.exitpoint, name) # Deep definition
except NotImplementedError:
d = option.to_type(self.exitpoint) # Type only
if isinstance(d, pytd.NothingType):
if isinstance(option, abstract.Empty):
d = pytd.AnythingType()
else:
assert isinstance(option, typing_overlay.NoReturn)
if isinstance(d, pytd.Type) and not isinstance(d, pytd.TypeParameter):
data.append(pytd.Constant(name, d))
else:
data.append(d)
else:
log.error("No visible options for %s", name)
data.append(pytd.Constant(name, pytd.AnythingType()))
return pytd_utils.WrapTypeDeclUnit("inferred", data)
@staticmethod
def _call_traces_to_function(call_traces, name_transform=lambda x: x):
funcs = collections.defaultdict(pytd_utils.OrderedSet)
for node, func, sigs, args, kws, retvar in call_traces:
# The lengths may be different in the presence of optional and kw args.
arg_names = max((sig.get_positional_names() for sig in sigs), key=len)
for i in range(len(arg_names)):
if not isinstance(func.data, abstract.BoundFunction) or i > 0:
arg_names[i] = function.argname(i)
arg_types = (a.data.to_type(node) for a in args)
ret = pytd_utils.JoinTypes(t.to_type(node) for t in retvar.data)
starargs = None
starstarargs = None
funcs[func.data.name].add(pytd.Signature(
tuple(pytd.Parameter(n, t, False, False, None)
for n, t in zip(arg_names, arg_types)) +
tuple(pytd.Parameter(name, a.data.to_type(node), False, False, None)
for name, a in kws),
starargs, starstarargs,
ret, exceptions=(), template=()))
functions = []
for name, signatures in funcs.items():
functions.append(pytd.Function(name_transform(name), tuple(signatures),
pytd.MethodTypes.METHOD))
return functions
def _is_typing_member(self, name, var):
for module_name in ("typing", "typing_extensions"):
if module_name not in self.loaded_overlays:
continue
module = self.loaded_overlays[module_name].get_module(name)
if name in module.members and module.members[name].data == var.data:
return True
return False
def pytd_functions_for_call_traces(self):
return self._call_traces_to_function(self._calls, escape.pack_partial)
def pytd_classes_for_call_traces(self):
class_to_records = collections.defaultdict(list)
for call_record in self._method_calls:
args = call_record.positional_arguments
if not any(isinstance(a.data, abstract.Unknown) for a in args):
# We don't need to record call signatures that don't involve
# unknowns - there's nothing to solve for.
continue
cls = args[0].data.get_class()
if isinstance(cls, abstract.PyTDClass):
class_to_records[cls].append(call_record)
classes = []
for cls, call_records in class_to_records.items():
full_name = cls.module + "." + cls.name if cls.module else cls.name
classes.append(pytd.Class(
name=escape.pack_partial(full_name),
metaclass=None,
parents=(pytd.NamedType("builtins.object"),), # not used in solver
methods=tuple(self._call_traces_to_function(call_records)),
constants=(),
classes=(),
decorators=(),
slots=None,
template=(),
))
return classes
def pytd_classes_for_namedtuple_instances(self):
return tuple(v.generate_ast() for v in self._generated_classes.values())
def compute_types(self, defs):
classes = (tuple(self.pytd_classes_for_unknowns()) +
tuple(self.pytd_classes_for_call_traces()) +
self.pytd_classes_for_namedtuple_instances())
functions = tuple(self.pytd_functions_for_call_traces())
aliases = () # aliases are instead recorded as constants
ty = pytd_utils.Concat(
self.pytd_for_types(defs),
pytd_utils.CreateModule("unknowns", classes=classes,
functions=functions, aliases=aliases))
ty = ty.Visit(optimize.CombineReturnsAndExceptions())
ty = ty.Visit(optimize.PullInMethodClasses())
ty = ty.Visit(visitors.DefaceUnresolved(
[ty, self.loader.concat_all()], escape.UNKNOWN))
return ty.Visit(visitors.AdjustTypeParameters())
def _check_return(self, node, actual, formal):
if not self.options.report_errors:
return True
views = abstract_utils.get_views([actual], node)
# Check for typevars in the return value first, since bad_matches
# expects not to get any.
bad = [view for view in views
if actual in view and view[actual].data.formal]
if not bad:
bad = self.matcher.bad_matches(actual, formal, node)
if bad:
self.errorlog.bad_return_type(
self.frames, node, formal, actual, bad)
return not bad
def check_types(src, filename, errorlog, options, loader,
deep=True, init_maximum_depth=INIT_MAXIMUM_DEPTH,
maximum_depth=None, **kwargs):
"""Verify the Python code."""
tracer = CallTracer(errorlog=errorlog, options=options,
generate_unknowns=False, loader=loader, **kwargs)
loc, defs = tracer.run_program(src, filename, init_maximum_depth)
snapshotter = metrics.get_metric("memory", metrics.Snapshot)
snapshotter.take_snapshot("analyze:check_types:tracer")
if deep:
if maximum_depth is None:
maximum_depth = (
QUICK_CHECK_MAXIMUM_DEPTH if options.quick else MAXIMUM_DEPTH)
tracer.analyze(loc, defs, maximum_depth=maximum_depth)
snapshotter.take_snapshot("analyze:check_types:post")
_maybe_output_debug(options, tracer.program)
def infer_types(src, errorlog, options, loader,
filename=None, deep=True, init_maximum_depth=INIT_MAXIMUM_DEPTH,
show_library_calls=False, maximum_depth=None, tracer_vm=None,
**kwargs):
"""Given Python source return its types.
Args:
src: A string containing Python source code.
errorlog: Where error messages go. Instance of errors.ErrorLog.
options: config.Options object
loader: A load_pytd.Loader instance to load PYI information.
filename: Filename of the program we're parsing.
deep: If True, analyze all functions, even the ones not called by the main
execution flow.
init_maximum_depth: Depth of analysis during module loading.
show_library_calls: If True, call traces are kept in the output.
maximum_depth: Depth of the analysis. Default: unlimited.
tracer_vm: An instance of CallTracer, in case the caller wants to
instantiate and retain the vm used for type inference.
**kwargs: Additional parameters to pass to vm.VirtualMachine
Returns:
A tuple of (ast: TypeDeclUnit, builtins: TypeDeclUnit)
Raises:
AssertionError: In case of a bad parameter combination.
"""
# If the caller has passed in a vm, use that.
if tracer_vm:
assert isinstance(tracer_vm, CallTracer)
tracer = tracer_vm
else:
tracer = CallTracer(errorlog=errorlog, options=options,
generate_unknowns=options.protocols,
store_all_calls=not deep, loader=loader, **kwargs)
loc, defs = tracer.run_program(src, filename, init_maximum_depth)
log.info("===Done running definitions and module-level code===")
snapshotter = metrics.get_metric("memory", metrics.Snapshot)
snapshotter.take_snapshot("analyze:infer_types:tracer")
if deep:
if maximum_depth is None:
if not options.quick:
maximum_depth = MAXIMUM_DEPTH
elif options.analyze_annotated:
# Since there's no point in analyzing annotated functions for inference,
# the presence of this option means that the user wants checking, too.
maximum_depth = QUICK_CHECK_MAXIMUM_DEPTH
else:
maximum_depth = QUICK_INFER_MAXIMUM_DEPTH
tracer.exitpoint = tracer.analyze(loc, defs, maximum_depth)
else:
tracer.exitpoint = loc
snapshotter.take_snapshot("analyze:infer_types:post")
ast = tracer.compute_types(defs)
ast = tracer.loader.resolve_ast(ast)
if tracer.has_unknown_wildcard_imports or any(
a in defs for a in abstract_utils.DYNAMIC_ATTRIBUTE_MARKERS):
if "__getattr__" not in ast:
ast = pytd_utils.Concat(
ast, builtins.GetDefaultAst(options.python_version))
# If merged with other if statement, triggers a ValueError: Unresolved class
# when attempts to load from the protocols file
if options.protocols:
protocols_pytd = tracer.loader.import_name("protocols")
else:
protocols_pytd = None
builtins_pytd = tracer.loader.concat_all()
# Insert type parameters, where appropriate
ast = ast.Visit(visitors.CreateTypeParametersForSignatures())
if options.protocols:
log.info("=========== PyTD to solve =============\n%s",
pytd_utils.Print(ast))
ast = convert_structural.convert_pytd(ast, builtins_pytd, protocols_pytd)
elif not show_library_calls:
log.info("Solving is turned off. Discarding call traces.")
# Rename remaining "~unknown" to "?"
ast = ast.Visit(visitors.RemoveUnknownClasses())
# Remove "~list" etc.:
ast = convert_structural.extract_local(ast)
_maybe_output_debug(options, tracer.program)
return ast, builtins_pytd
def _maybe_output_debug(options, program):
"""Maybe emit debugging output."""
if options.output_cfg or options.output_typegraph:
dot = debug.program_to_dot(program, set([]), bool(options.output_cfg))
svg_file = options.output_cfg or options.output_typegraph
proc = subprocess.Popen(["/usr/bin/dot", "-T", "svg", "-o", svg_file],
stdin=subprocess.PIPE, universal_newlines=True)
(_, stderr) = proc.communicate(dot)
if stderr:
log.info("Failed to create %s: %s", svg_file, stderr)
if options.output_debug:
text = debug.program_to_text(program)
if options.output_debug == "-":
log.info("=========== Program Dump =============\n%s", text)
else:
with options.open_function(options.output_debug, "w") as fi:
fi.write(text)
| 2.25 | 2 |
src/cupcake/post_isoseq_cluster/demux_by_barcode_groups.py | milescsmith/cDNA_Cupcake | 0 | 2928 | #!/usr/bin/env python
__author__ = "<EMAIL>"
"""
Given a pooled input GFF + demux CSV file, write out per-{barcode group} GFFs
If input fasta/fastq is given, optionally also output per-{barcode group} FASTA/FASTQ
"""
import re
from collections import defaultdict
from csv import DictReader
from typing import Optional
import typer
from Bio import SeqIO
import cupcake.sequence.GFF as GFF
from cupcake import version_callback
from cupcake import cupcake_logger as logger
rex_pbid = re.compile(r"(PB.\d+.\d+)(|\S+)")
app = typer.Typer(name="cupcake.post_isoseq_cluster.demux_by_barcode_groups")
def get_type_fafq(in_filename):
in_filename = in_filename.upper()
if in_filename.endswith(".FA") or in_filename.endswith("FASTA"):
return "fasta"
elif in_filename.endswith(".FQ") or in_filename.endswith("FASTQ"):
return "fastq"
else:
raise Exception(
f"Unrecognized file suffix .{in_filename[in_filename.find('.'):]}! Must end with .fasta or .fastq!"
)
def regroup_gff(
pooled_gff, demux_count_file, output_prefix, out_group_dict, in_fafq=None
):
"""
:param pooled_sam: SAM file
:param demux_count_file: comma-delimited per-barcode count file
:param output_prefix: output prefix for GFF
:param out_group_dict: dict of barcode name --> group to be long in (ex: {'EM1':'EM', 'EM2':'EM'})
:param in_fafq: optional fasta/fastq that was input to SAM
"""
if in_fafq is not None:
type_fafq = get_type_fafq(in_fafq)
in_tissue = defaultdict(
lambda: set()
) # pbid --> list of tissue it is in (EM, END, R)
for r in DictReader(open(demux_count_file), delimiter=","):
for k, v in r.items():
if k != "id" and int(v) > 0:
in_tissue[r["id"]].add(k)
# in_tissue = dict(in_tissue)
handles = {}
handles_fafq = {}
for g in out_group_dict.values():
handles[g] = open(f"{output_prefix}_{g}_only.gff", "w")
if in_fafq is not None:
handles_fafq[g] = open(f"{output_prefix}_{g}_only.{type_fafq}", "w")
if in_fafq is not None:
fafq_dict = SeqIO.to_dict(SeqIO.parse(open(in_fafq), type_fafq))
fafq_dict_keys = list(fafq_dict.keys())
for k in fafq_dict_keys:
m = rex_pbid.match(k)
if m is not None:
fafq_dict[m.group(1)] = fafq_dict[k]
reader = GFF.collapseGFFReader(pooled_gff)
for r in reader:
groups_to_write_in = set()
pbid = r.seqid
if pbid not in in_tissue:
logger.info(
f"WARNING: {pbid} does not belong to any group indicated by outgroup_dict"
)
for tissue in in_tissue[pbid]:
groups_to_write_in.add(out_group_dict[tissue])
for g in groups_to_write_in:
GFF.write_collapseGFF_format(handles[g], r)
if in_fafq is not None:
SeqIO.write(fafq_dict[pbid], handles_fafq[g], type_fafq)
@app.command(name="")
def main(
pooled_gff: str = typer.Argument(..., help="Pooled GFF file"),
demux_count_file: str = typer.Argument(..., help="Demux count file"),
output_prefix: str = typer.Argument(..., help="Output prefix for GFF outputs"),
outgroup_dict: str = typer.Argument(..., help="Tuples indicating barcode grouping"),
pooled_fastx: Optional[str] = typer.Option(
None,
help="Pooled FASTA/FASTQ (optional, if given, will also output demux fa/fq)",
),
version: bool = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Prints the version of the SQANTI3 package.",
),
) -> None:
tmp = eval(outgroup_dict)
out_group_dict = dict([tmp]) if len(tmp) == 1 else dict(tmp)
regroup_gff(
pooled_gff,
demux_count_file,
output_prefix,
out_group_dict,
pooled_fastx,
)
if __name__ == "__main__":
typer.run(main)
| 2.8125 | 3 |
vll/data/circle_dataset.py | paulhfu/3dcv-students | 4 | 2929 | import random
import numpy as np
import math
from skimage.draw import line, line_aa, circle, set_color, circle_perimeter_aa
from skimage.io import imsave
from skimage.util import random_noise
maxSlope = 10 # restrict the maximum slope of generated lines for stability
minLength = 20 # restrict the minimum length of line segments
class ICircleDataset:
'''
Generator of circle segment images.
Images will have 1 random circle each, filled with noise and distractor lines.
Class also offers functionality for drawing line parameters, hypotheses and point predictions.
'''
def __init__(self, imgW = 64, imgH = 64, margin = -5, bg_clr = 0.5):
'''
Constructor.
imgW -- image width (default 64)
imgH -- image height (default 64)
margin -- lines segments are sampled within this margin, negative value means that a line segment can start or end outside the image (default -5)
bg_clr -- background intensity (default 0.5)
'''
self.imgW = imgW
self.imgH = imgH
self.margin = margin
self.bg_clr = bg_clr
def draw_circle(self, data, cX, cY, r, clr, alpha=1.0):
'''
Draw a circle with the given color and opacity.
data -- image to draw to
cX -- x value of circle center
cY -- y value of circle center
r -- radius of circle
clr -- line color, triple of values
alpha -- opacity (default 1.0)
'''
cY = int(cY * self.imgH)
cX = int(cX * self.imgW)
r = int(r * self.imgW)
rr, cc, val = circle_perimeter_aa(cY, cX, r)
set_color(data, (rr, cc), clr, val)
def draw_hyps(self, labels, scores, data=None):
'''
Draw a set of line hypothesis for a batch of images.
labels -- line parameters, array shape (NxMx2) where
N is the number of images in the batch
M is the number of hypotheses per image
2 is the number of line parameters (intercept, slope)
scores -- hypotheses scores, array shape (NxM), see above, higher score will be drawn with higher opacity
data -- batch of images to draw to, if empty a new batch wil be created according to the shape of labels
'''
n = labels.shape[0] # number of images
m = labels.shape[1] # number of hypotheses
if data is None: # create new batch of images
data = np.zeros((n, self.imgH, self.imgW, 3), dtype=np.float32)
data.fill(self.bg_clr)
clr = (0, 0, 1)
for i in range (0, n):
for j in range (0, m):
lY1 = int(labels[i, j, 0] * self.imgH)
lY2 = int(labels[i, j, 1] * self.imgW + labels[i, j, 0] * self.imgH)
self.draw_line(data[i], 0, lY1, self.imgW, lY2, clr, scores[i, j])
return data
def draw_models(self, labels, data=None, correct=None):
'''
Draw circles for a batch of images.
labels -- circle parameters, array shape (Nx3) where
N is the number of images in the batch
3 is the number of circles parameters (center x, center y, radius)
data -- batch of images to draw to, if empty a new batch wil be created according to the shape of labels
and circles will be green, circles will be blue otherwise
correct -- array of shape (N) indicating whether a circle estimate is correct
'''
n = labels.shape[0]
if data is None:
data = np.zeros((n, self.imgH, self.imgW, 3), dtype=np.float32)
data.fill(self.bg_clr)
clr = (0, 1, 0)
else:
clr = (0, 0, 1)
for i in range (0, n):
self.draw_circle(data[i], labels[i, 0], labels[i, 1], labels[i, 2], clr)
if correct is not None:
# draw border green if estiamte is correct, red otherwise
if correct[i]: borderclr = (0, 1, 0)
else: borderclr = (1, 0, 0)
set_color(data[i], line(0, 0, 0, self.imgW-1), borderclr)
set_color(data[i], line(0, 0, self.imgH-1, 0), borderclr)
set_color(data[i], line(self.imgH-1, 0, self.imgH-1, self.imgW-1), borderclr)
set_color(data[i], line(0, self.imgW-1, self.imgH-1, self.imgW-1), borderclr)
return data
def draw_points(self, points, data, inliers=None):
'''
Draw 2D points for a batch of images.
points -- 2D points, array shape (Nx2xM) where
N is the number of images in the batch
2 is the number of point dimensions (x, y)
M is the number of points
data -- batch of images to draw to
inliers -- soft inlier score for each point,
if given and score < 0.5 point will be drawn green, red otherwise
'''
n = points.shape[0] # number of images
m = points.shape[2] # number of points
for i in range (0, n):
for j in range(0, m):
clr = (0.2, 0.2, 0.2) # draw predicted points as dark circles
if inliers is not None and inliers[i, j] > 0.5:
clr = (0.7, 0.7, 0.7) # draw inliers as light circles
r = int(points[i, 0, j] * self.imgH)
c = int(points[i, 1, j] * self.imgW)
rr, cc = circle(r, c, 2)
set_color(data[i], (rr, cc), clr)
return data
def samples(self, n):
'''
Create new input images of random line segments and distractors along with ground truth parameters.
n -- number of images to create
'''
data = np.zeros((n, self.imgH, self.imgW, 3), dtype=np.float32)
data.fill(self.bg_clr)
labels = np.zeros((n, 3), dtype=np.float32)
for i in range (0, n):
data[i] = random_noise(data[i], mode='speckle')
return data, labels
| 2.921875 | 3 |
VAE/reduced_model/nesm_generator.py | youngmg1995/NES-Music-Maker | 3 | 2930 | <reponame>youngmg1995/NES-Music-Maker
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:14:19 2020
@author: Mitchell
nesm_generator.py
~~~~~~~~~~~~~~~~~
This file serves as a script for using our pre-trained VAE model to generate
brand new NES music soundtracks. NOTE - using the reduced model we only
generate the first melodic voice for each track rather than each of the four
voices present in an NESM track. To do so we first reconstruct our model using
the file VAE class defined in `VAE.py` and the same parameters used in
`model_training`. Then we use functions from the file `generation_utils` to
have our trained model create entirely new and original NES music.
"""
# Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# NOTE - nesmdb folder manually added to environment libraries
from dataset_utils import load_training
from VAE import VAE
from generation_utils import generate_seprsco, latent_SVD, get_latent_vecs,\
plot_track, filter_tracks
import nesmdb
from nesmdb.vgm.vgm_to_wav import save_vgmwav
import tensorflow as tf
import numpy as np
import os, json
### Load Mappings
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Parameters for shape of dataset (note these are also used for model def.)
measures = 8
measure_len = 96
# load data
training_foldername = '../../nesmdb24_seprsco/train/'
train_save_filename = 'transformed_dataset.json'
dataset , labels2int_map , int2labels_map = \
load_training(training_foldername, train_save_filename,
measures = measures, measure_len = measure_len)
### Reinitiate Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Model Parameters
latent_dim = 124
input_dim = len(int2labels_map) - 1
dropout = .1
maxnorm = None
vae_b1 , vae_b2 = .02 , .1
print('Reinitiating VAE Model')
# Build Model
model = VAE(latent_dim, input_dim, measures, measure_len, dropout,
maxnorm, vae_b1 , vae_b2)
# Reload Saved Weights
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "model_ckpt")
model.load_weights(checkpoint_prefix)
model.build(tf.TensorShape([None, measures, measure_len, ]))
# Print Summary of Model
model.summary()
### Sample Latent Variable Distributions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we use SVD to more effectively sample from the orthogonal components
# of our latent space
# Parameters for sampling
num_songs = 10
print('Generating Latent Samples to Generate {} New Tracks'.format(num_songs))
# Grab distributions of dataset over latent space
# Have to run in batches due to size of the dataset
batch_size = 300
latent_vecs = get_latent_vecs(model, dataset, batch_size)
# Sample from normal distribution
rand_vecs = np.random.normal(0.0, 1.0, (num_songs, latent_dim))
# perform SVD
plot_eigenvalues = True
sample_vecs = latent_SVD(latent_vecs, rand_vecs, plot_eigenvalues)
### Generate New Tracks
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create new seprsco tracks using our model and the random samples
# Seprsco files can later be converted to valid NES music format
# Parameters for track generation (specifically filtering)
p_min = .5
print('Generating New Tracks from Latent Samples')
# Decode samples using VAE
decoded_tracks = model.decoder(sample_vecs)
# Plot first decoded track
print("Example Model Generated Track")
plot_track(decoded_tracks[0])
# Filter Track
decoded_tracks = filter_tracks(decoded_tracks, p_min)
# Plot first filtered track
print("Example Filtered Track")
plot_track(decoded_tracks[0])
# Convert tracks to seprsco format
print('Converting Model Output to Seprsco')
seprsco_tracks = generate_seprsco(decoded_tracks, int2labels_map)
### Convert to WAV
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Convert seprsco tracks to WAV files so we can listen!!!
print('Converting Seprsco to WAV Audio')
wav_tracks = []
for track in seprsco_tracks:
wav = nesmdb.convert.seprsco_to_wav(track)
wav_tracks.append(wav)
### Save WAV Files
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save our wav tracks to appropriate files (be sure not to overwrite existing)
# Also save latent variables so we can reproduce songs we like
# Save WAV tracks
save_wav = False
if save_wav:
print('Saving Generated WAV Audio Tracks')
wav_folder = 'model_gen_files/'
for i in range(len(wav_tracks)):
wav_file = wav_folder+'VAE_NESM_{}.wav'.format(i)
save_vgmwav(wav_file, wav_tracks[i])
# Save Latent Variables
save_latent_var = False
if save_latent_var:
print('Saving Latent Variables for Generated Tracks')
latent_filename = os.path.join(wav_folder, "latent_variables.json")
with open(latent_filename, 'w') as f:
json.dump({
'VAE_NESM_{}.wav'.format(i): sample_vecs[i].tolist()
for i in range(sample_vecs.shape[0])
}, f)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------END FILE------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 2.109375 | 2 |
anlogger/logger.py | anttin/anlogger | 0 | 2931 | import logging
import logging.handlers
import os
class Logger(object):
def __init__(self, name, default_loglevel='INFO', fmt=None, syslog=None):
self.name = name
self.syslog = syslog
self.fmt = fmt if fmt is not None else "%(asctime)-15s %(name)s %(levelname)s %(message)s"
if 'LOGLEVEL' in os.environ:
self.level = os.environ['LOGLEVEL'].upper()
else:
self.level = default_loglevel.upper()
logging.basicConfig(format=self.fmt)
self.logger = logging.getLogger(self.name)
self.logger.setLevel(self.level)
if self.syslog is not None and self.syslog not in (False, 0):
if isinstance(self.syslog, (list, tuple)):
_addr = tuple(self.syslog)
elif isinstance(self.syslog, str):
_addr = self.syslog
else:
_addr = "/dev/log" if os.path.exists("/dev/log") else None
if _addr is not None:
handler = logging.handlers.SysLogHandler(address=_addr)
self.logger.addHandler(handler)
def get(self):
return self.logger
| 2.75 | 3 |
Python/hello_world-theopaid.py | saurabhcommand/Hello-world | 1,428 | 2932 | #Author <NAME>
print("Hello World")
hello_list = ["Hello World"]
print(hello_list[0])
for i in hello_list:
print(i) | 3.3125 | 3 |
question_answering/stubs.py | uliang/NaturalLanguageQueryingSystem | 0 | 2933 | <filename>question_answering/stubs.py
from collections import namedtuple
from unittest.mock import MagicMock
_fake_ext = namedtuple('_', ['qtype', 'kb_ident'])
class FakeDoc:
def __init__(self, text, qtype, kb_ident):
self._ = _fake_ext(qtype, kb_ident)
self.text = text
def __str__(self):
return f"<[MOCKED NLP]{self.text}>"
| 2.71875 | 3 |
lib/env/trade/BaseTradeStrategy.py | devas123/Bitcoin-Trader-RL | 0 | 2934 | from abc import ABCMeta, abstractmethod
from typing import Tuple, Callable
class BaseTradeStrategy(object, metaclass=ABCMeta):
@abstractmethod
def __init__(self,
commissionPercent: float,
maxSlippagePercent: float,
base_precision: int,
asset_precision: int,
min_cost_limit: float,
min_amount_limit: float):
pass
@abstractmethod
def trade(self,
action: int,
n_discrete_actions: int,
balance: float,
asset_held: float,
current_price: Callable[[str], float]) -> Tuple[float, float, float, float]:
raise NotImplementedError()
| 3.265625 | 3 |
4day/Book04_1.py | jsjang93/joony | 0 | 2935 | <reponame>jsjang93/joony<gh_stars>0
# Book04_1.py
class Book:
category = '소설' # Class 멤버
b1 = Book(); print(b1.category)
b2 = b1; print(b2.category)
print(Book.category)
Book.category = '수필'
print(b2.category); print(b1.category) ; print(Book.category)
b2.category = 'IT'
print(b2.category); print(b1.category) ; print(Book.category) | 2.9375 | 3 |
wrappers/python/virgil_crypto_lib/foundation/kdf1.py | odidev/virgil-crypto-c | 26 | 2936 | <filename>wrappers/python/virgil_crypto_lib/foundation/kdf1.py
# Copyright (C) 2015-2021 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: <NAME> Inc. <<EMAIL>>
from ctypes import *
from ._c_bridge import VscfKdf1
from ._c_bridge import VscfImplTag
from ._c_bridge import VscfStatus
from virgil_crypto_lib.common._c_bridge import Data
from virgil_crypto_lib.common._c_bridge import Buffer
from .alg import Alg
from .kdf import Kdf
class Kdf1(Alg, Kdf):
"""Virgil Security implementation of the KDF1 (ISO-18033-2) algorithm."""
def __init__(self):
"""Create underlying C context."""
self._lib_vscf_kdf1 = VscfKdf1()
self._c_impl = None
self._ctx = None
self.ctx = self._lib_vscf_kdf1.vscf_kdf1_new()
def __delete__(self, instance):
"""Destroy underlying C context."""
self._lib_vscf_kdf1.vscf_kdf1_delete(self.ctx)
def set_hash(self, hash):
self._lib_vscf_kdf1.vscf_kdf1_use_hash(self.ctx, hash.c_impl)
def alg_id(self):
"""Provide algorithm identificator."""
result = self._lib_vscf_kdf1.vscf_kdf1_alg_id(self.ctx)
return result
def produce_alg_info(self):
"""Produce object with algorithm information and configuration parameters."""
result = self._lib_vscf_kdf1.vscf_kdf1_produce_alg_info(self.ctx)
instance = VscfImplTag.get_type(result)[0].take_c_ctx(cast(result, POINTER(VscfImplTag.get_type(result)[1])))
return instance
def restore_alg_info(self, alg_info):
"""Restore algorithm configuration from the given object."""
status = self._lib_vscf_kdf1.vscf_kdf1_restore_alg_info(self.ctx, alg_info.c_impl)
VscfStatus.handle_status(status)
def derive(self, data, key_len):
"""Derive key of the requested length from the given data."""
d_data = Data(data)
key = Buffer(key_len)
self._lib_vscf_kdf1.vscf_kdf1_derive(self.ctx, d_data.data, key_len, key.c_buffer)
return key.get_bytes()
@classmethod
def take_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_kdf1 = VscfKdf1()
inst.ctx = c_ctx
return inst
@classmethod
def use_c_ctx(cls, c_ctx):
inst = cls.__new__(cls)
inst._lib_vscf_kdf1 = VscfKdf1()
inst.ctx = inst._lib_vscf_kdf1.vscf_kdf1_shallow_copy(c_ctx)
return inst
@property
def c_impl(self):
return self._c_impl
@property
def ctx(self):
return self._ctx
@ctx.setter
def ctx(self, value):
self._ctx = self._lib_vscf_kdf1.vscf_kdf1_shallow_copy(value)
self._c_impl = self._lib_vscf_kdf1.vscf_kdf1_impl(self.ctx)
| 1.132813 | 1 |
mysite/zoo/tests.py | leixiayang/django-python | 54 | 2937 | #!/usr/bin/env python
# encoding: utf-8
from django.test import TestCase
from zoo import models
class AnimalTestCase(TestCase):
"""Test animals' sound """
def test_dog_says(self):
"""test dog says woof or not
"""
dog = models.Dog(name='Snoopy')
self.assertEqual(dog.says(), 'woof')
def test_cat_says(self):
"""test cat says meow of not
"""
cat = models.Cat(name='Garfield')
self.assertEqual(cat.says(), 'meow')
| 2.71875 | 3 |
EX025.py | gjaosdij/PythonProject | 0 | 2938 | print('Digite seu nome completo: ')
nome = input().strip().upper()
print('Seu nome tem "Silva"?')
print('SILVA' in nome)
| 3.734375 | 4 |
configs/_base_/datasets/stvqa_dataset.py | linxi1158/iMIX | 23 | 2939 | dataset_type = 'STVQADATASET'
data_root = '/home/datasets/mix_data/iMIX/'
feature_path = 'data/datasets/stvqa/defaults/features/'
ocr_feature_path = 'data/datasets/stvqa/defaults/ocr_features/'
annotation_path = 'data/datasets/stvqa/defaults/annotations/'
vocab_path = 'data/datasets/stvqa/defaults/extras/vocabs/'
train_datasets = ['train']
test_datasets = ['val']
reader_train_cfg = dict(
type='STVQAREADER',
card='default',
mix_features=dict(
train=data_root + feature_path + 'detectron.lmdb',
val=data_root + feature_path + 'detectron.lmdb',
test=data_root + feature_path + 'detectron.lmdb',
),
mix_ocr_features=dict(
train=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
val=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
test=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
),
mix_annotations=dict(
train=data_root + annotation_path + 'imdb_subtrain.npy',
val=data_root + annotation_path + 'imdb_subval.npy',
test=data_root + annotation_path + 'imdb_test_task3.npy',
),
datasets=train_datasets)
reader_test_cfg = dict(
type='STVQAREADER',
card='default',
mix_features=dict(
train=data_root + feature_path + 'detectron.lmdb',
val=data_root + feature_path + 'detectron.lmdb',
test=data_root + feature_path + 'detectron.lmdb',
),
mix_ocr_features=dict(
train=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
val=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
test=data_root + ocr_feature_path + 'ocr_en_frcn_features.lmdb',
),
mix_annotations=dict(
train=data_root + annotation_path + 'imdb_subtrain.npy',
val=data_root + annotation_path + 'imdb_subval.npy',
test=data_root + annotation_path + 'imdb_test_task3.npy',
),
datasets=train_datasets)
info_cpler_cfg = dict(
type='STVQAInfoCpler',
glove_weights=dict(
glove6b50d=data_root + 'glove/glove.6B.50d.txt.pt',
glove6b100d=data_root + 'glove/glove.6B.100d.txt.pt',
glove6b200d=data_root + 'glove/glove.6B.200d.txt.pt',
glove6b300d=data_root + 'glove/glove.6B.300d.txt.pt',
),
fasttext_weights=dict(
wiki300d1m=data_root + 'fasttext/wiki-news-300d-1M.vec',
wiki300d1msub=data_root + 'fasttext/wiki-news-300d-1M-subword.vec',
wiki_bin=data_root + 'fasttext/wiki.en.bin',
),
tokenizer='/home/datasets/VQA/bert/' + 'bert-base-uncased-vocab.txt',
mix_vocab=dict(
answers_st_5k=data_root + vocab_path + 'fixed_answer_vocab_stvqa_5k.txt',
vocabulary_100k=data_root + vocab_path + 'vocabulary_100k.txt',
),
max_seg_lenth=20,
max_ocr_lenth=10,
word_mask_ratio=0.0,
vocab_name='vocabulary_100k',
vocab_answer_name='answers_st_5k',
glove_name='glove6b300d',
fasttext_name='wiki_bin',
if_bert=True,
)
train_data = dict(
samples_per_gpu=16,
workers_per_gpu=1,
data=dict(type=dataset_type, reader=reader_train_cfg, info_cpler=info_cpler_cfg, limit_nums=800))
test_data = dict(
samples_per_gpu=16,
workers_per_gpu=1,
data=dict(type=dataset_type, reader=reader_test_cfg, info_cpler=info_cpler_cfg),
)
| 1.734375 | 2 |
src/rpocore/migrations/0007_auto_20160927_1517.py | 2martens/rpo-website | 0 | 2940 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-27 13:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('rpocore', '0006_auto_20160921_1924'),
]
operations = [
migrations.CreateModel(
name='SupportingOrganization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_order', mezzanine.core.fields.OrderField(null=True, verbose_name='Order')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('logo', models.ImageField(upload_to='', verbose_name='Logo of organization')),
('url', models.CharField(max_length=200, verbose_name='URL')),
],
options={
'verbose_name_plural': 'Supporting organizations',
'ordering': ('_order',),
'verbose_name': 'Supporting organization',
},
),
migrations.AlterField(
model_name='carouselitem',
name='homepage',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='rpocore.HomepagePage', verbose_name='Homepage'),
),
migrations.AlterField(
model_name='homepagepage',
name='process',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='rpocore.Process', verbose_name='Process'),
),
migrations.AlterField(
model_name='notablesupporter',
name='supporter_page',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notable_supporters', to='rpocore.SupporterPage', verbose_name='Supporter page'),
),
migrations.AlterField(
model_name='phase',
name='process',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rpocore.Process', verbose_name='Process'),
),
migrations.AlterField(
model_name='statementpage',
name='formal_statements',
field=models.ManyToManyField(blank=True, to='rpocore.FormalStatement', verbose_name='Formal statements'),
),
migrations.AlterField(
model_name='statementpage',
name='informal_statements',
field=models.ManyToManyField(blank=True, to='rpocore.InformalStatement', verbose_name='Informal statements'),
),
migrations.AlterField(
model_name='supporter',
name='support_group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='rpocore.SupportGroup', verbose_name='Support group'),
),
]
| 1.578125 | 2 |
code/Line.py | manno-xx/FutureLearnRobotBuggy | 0 | 2941 | #LineSensor test
from gpiozero import LineSensor
from time import sleep
from signal import pause
def lineDetected():
print('line detected')
def noLineDetected():
print('no line detected')
sensor = LineSensor(14)
sensor.when_line = lineDetected
sensor.when_no_line = noLineDetected
pause()
sensor.close()
| 2.859375 | 3 |
litex_boards/platforms/myminieye_runber.py | chmousset/litex-boards | 0 | 2942 | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from migen import *
from litex.build.generic_platform import *
from litex.build.gowin.platform import GowinPlatform
from litex.build.openfpgaloader import OpenFPGALoader
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk12", 0, Pins("4"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("23"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("24"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("25"), IOStandard("LVCMOS33")),
("user_led", 3, Pins("26"), IOStandard("LVCMOS33")),
("user_led", 4, Pins("27"), IOStandard("LVCMOS33")),
("user_led", 5, Pins("28"), IOStandard("LVCMOS33")),
("user_led", 6, Pins("29"), IOStandard("LVCMOS33")),
("user_led", 7, Pins("30"), IOStandard("LVCMOS33")),
# RGB led, active-low
("rgb_led", 0,
Subsignal("r", Pins("112")),
Subsignal("g", Pins("114")),
Subsignal("b", Pins("113")),
IOStandard("LVCMOS33"),
),
("rgb_led", 1,
Subsignal("r", Pins("106")),
Subsignal("g", Pins("111")),
Subsignal("b", Pins("110")),
IOStandard("LVCMOS33"),
),
("rgb_led", 2,
Subsignal("r", Pins("101")),
Subsignal("g", Pins("104")),
Subsignal("b", Pins("102")),
IOStandard("LVCMOS33"),
),
("rgb_led", 3,
Subsignal("r", Pins("98")),
Subsignal("g", Pins("100")),
Subsignal("b", Pins("99")),
IOStandard("LVCMOS33"),
),
# Switches
("user_sw", 0, Pins("75"), IOStandard("LVCMOS33")),
("user_sw", 1, Pins("76"), IOStandard("LVCMOS33")),
("user_sw", 2, Pins("78"), IOStandard("LVCMOS33")),
("user_sw", 3, Pins("79"), IOStandard("LVCMOS33")),
("user_sw", 4, Pins("80"), IOStandard("LVCMOS33")),
("user_sw", 5, Pins("81"), IOStandard("LVCMOS33")),
("user_sw", 6, Pins("82"), IOStandard("LVCMOS33")),
("user_sw", 7, Pins("83"), IOStandard("LVCMOS33")),
# Buttons.
("user_btn", 0, Pins("58"), IOStandard("LVCMOS33")),
("user_btn", 1, Pins("59"), IOStandard("LVCMOS33")),
("user_btn", 2, Pins("60"), IOStandard("LVCMOS33")),
("user_btn", 3, Pins("61"), IOStandard("LVCMOS33")),
("user_btn", 4, Pins("62"), IOStandard("LVCMOS33")),
("user_btn", 5, Pins("63"), IOStandard("LVCMOS33")),
("user_btn", 6, Pins("64"), IOStandard("LVCMOS33")),
("user_btn", 7, Pins("65"), IOStandard("LVCMOS33")),
# Serial.
# FT232H has only one interface -> use (arbitrary) two pins from J2 to
# connect an external USB<->serial adapter
("serial", 0,
Subsignal("tx", Pins("116")), # J2.17
Subsignal("rx", Pins("115")), # J2.18
IOStandard("LVCMOS33")
),
# Seven Segment
("seven_seg_dig", 0, Pins("137"), IOStandard("LVCMOS33")),
("seven_seg_dig", 1, Pins("140"), IOStandard("LVCMOS33")),
("seven_seg_dig", 2, Pins("141"), IOStandard("LVCMOS33")),
("seven_seg_dig", 3, Pins("7"), IOStandard("LVCMOS33")),
("seven_seg", 0, Pins("138 142 9 11 12 139 8 10"), IOStandard("LVCMOS33")),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
["J1", "- 38 39 40 41 42 43 44 66 67 68 69 70 71 72 96 95 94 93 -"],
["J2", "- 136 135 134 133 132 131 130 129 128 123 122 121 120 119 118 117 116 115 -"],
]
# Platform -----------------------------------------------------------------------------------------
class Platform(GowinPlatform):
default_clk_name = "clk12"
default_clk_period = 1e9/12e6
def __init__(self, toolchain="gowin"):
GowinPlatform.__init__(self, "GW1N-UV4LQ144C6/I5", _io, _connectors, toolchain=toolchain, devicename="GW1N-4")
self.toolchain.options["use_mspi_as_gpio"] = 1
def create_programmer(self):
return OpenFPGALoader("runber")
def do_finalize(self, fragment):
GowinPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk12", loose=True), 1e9/12e6)
| 1.195313 | 1 |
combo/search/discrete/policy.py | zhangkunliang/BayesOptimization | 139 | 2943 | import numpy as np
import copy
import combo.misc
import cPickle as pickle
from results import history
from .. import utility
from ...variable import variable
from ..call_simulator import call_simulator
from ... import predictor
from ...gp import predictor as gp_predictor
from ...blm import predictor as blm_predictor
import combo.search.score
MAX_SEACH = int(20000)
class policy:
def __init__(self, test_X, config=None):
self.predictor = None
self.training = variable()
self.test = self._set_test(test_X)
self.actions = np.arange(0, self.test.X.shape[0])
self.history = history()
self.config = self._set_config(config)
def set_seed(self, seed):
self.seed = seed
np.random.seed(self.seed)
def delete_actions(self, index, actions=None):
actions = self._set_unchosed_actions(actions)
return np.delete(actions, index)
def write(self, action, t, X=None):
if X is None:
X = self.test.X[action, :]
Z = self.test.Z[action, :] if self.test.Z is not None else None
else:
Z = self.predictor.get_basis(X) \
if self.predictor is not None else None
self.new_data = variable(X, t, Z)
self.history.write(t, action)
self.training.add(X=X, t=t, Z=Z)
def random_search(self, max_num_probes, num_search_each_probe=1,
simulator=None, is_disp=True):
N = int(num_search_each_probe)
if int(max_num_probes) * N > len(self.actions):
raise ValueError('max_num_probes * num_search_each_probe must \
be smaller than the length of candidates')
if is_disp:
utility.show_interactive_mode(simulator, self.history)
for n in xrange(0, max_num_probes):
if is_disp and N > 1:
utility.show_start_message_multi_search(self.history.num_runs)
action = self.get_random_action(N)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def bayes_search(self, training=None, max_num_probes=None,
num_search_each_probe=1,
predictor=None, is_disp=True,
simulator=None, score='TS', interval=0,
num_rand_basis=0):
if max_num_probes is None:
max_num_probes = 1
simulator = None
is_rand_expans = False if num_rand_basis == 0 else True
self.training = self._set_training(training)
if predictor is None:
self.predictor = self._init_predictor(is_rand_expans)
else:
self.predictor = predictor
N = int(num_search_each_probe)
for n in xrange(max_num_probes):
if utility.is_learning(n, interval):
self.predictor.fit(self.training, num_rand_basis)
self.test.Z = self.predictor.get_basis(self.test.X)
self.training.Z = self.predictor.get_basis(self.training.X)
self.predictor.prepare(self.training)
else:
try:
self.predictor.update(self.training, self.new_data)
except:
self.predictor.prepare(self.training)
if num_search_each_probe != 1:
utility.show_start_message_multi_search(self.history.num_runs,
score)
K = self.config.search.multi_probe_num_sampling
alpha = self.config.search.alpha
action = self.get_actions(score, N, K, alpha)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def get_score(self, mode, predictor=None, training=None, alpha=1):
self._set_training(training)
self._set_predictor(predictor)
actions = self.actions
test = self.test.get_subset(actions)
if mode == 'EI':
f = combo.search.score.EI(predictor, training, test)
elif mode == 'PI':
f = combo.search.score.PI(predictor, training, test)
elif mode == 'TS':
f = combo.search.score.TS(predictor, training, test, alpha)
else:
raise NotImplementedError('mode must be EI, PI or TS.')
return f
def get_marginal_score(self, mode, chosed_actions, N, alpha):
f = np.zeros((N, len(self.actions)))
new_test = self.test.get_subset(chosed_actions)
virtual_t \
= self.predictor.get_predict_samples(self.training, new_test, N)
for n in xrange(N):
predictor = copy.deepcopy(self.predictor)
train = copy.deepcopy(self.training)
virtual_train = new_test
virtual_train.t = virtual_t[n, :]
if virtual_train.Z is None:
train.add(virtual_train.X, virtual_train.t)
else:
train.add(virtual_train.X, virtual_train.t, virtual_train.Z)
try:
predictor.update(train, virtual_train)
except:
predictor.prepare(train)
f[n, :] = self.get_score(mode, predictor, train)
return f
def get_actions(self, mode, N, K, alpha):
f = self.get_score(mode, self.predictor, self.training, alpha)
temp = np.argmax(f)
action = self.actions[temp]
self.actions = self.delete_actions(temp)
chosed_actions = np.zeros(N, dtype=int)
chosed_actions[0] = action
for n in xrange(1, N):
f = self.get_marginal_score(mode, chosed_actions[0:n], K, alpha)
temp = np.argmax(np.mean(f, 0))
chosed_actions[n] = self.actions[temp]
self.actions = self.delete_actions(temp)
return chosed_actions
def get_random_action(self, N):
random_index = np.random.permutation(xrange(self.actions.shape[0]))
index = random_index[0:N]
action = self.actions[index]
self.actions = self.delete_actions(index)
return action
def load(self, file_history, file_training=None, file_predictor=None):
self.history.load(file_history)
if file_training is None:
N = self.history.total_num_search
X = self.test.X[self.history.chosed_actions[0:N], :]
t = self.history.fx[0:N]
self.training = variable(X=X, t=t)
else:
self.training = variable()
self.training.load(file_training)
if file_predictor is not None:
with open(file_predictor) as f:
self.predictor = pickle.load(f)
def export_predictor(self):
return self.predictor
def export_training(self):
return self.training
def export_history(self):
return self.history
def _set_predictor(self, predictor=None):
if predictor is None:
predictor = self.predictor
return predictor
def _init_predictor(self, is_rand_expans, predictor=None):
self.predictor = self._set_predictor(predictor)
if self.predictor is None:
if is_rand_expans:
self.predictor = blm_predictor(self.config)
else:
self.predictor = gp_predictor(self.config)
return self.predictor
def _set_training(self, training=None):
if training is None:
training = self.training
return training
def _set_unchosed_actions(self, actions=None):
if actions is None:
actions = self.actions
return actions
def _set_test(self, test_X):
if isinstance(test_X, np.ndarray):
test = variable(X=test_X)
elif isinstance(test_X, variable):
test = test_X
else:
raise TypeError('The type of test_X must \
take ndarray or combo.variable')
return test
def _set_config(self, config=None):
if config is None:
config = combo.misc.set_config()
return config
| 2.21875 | 2 |
venv/lib/python3.9/site-packages/py2app/bootstrap/disable_linecache.py | dequeb/asmbattle | 193 | 2944 | <filename>venv/lib/python3.9/site-packages/py2app/bootstrap/disable_linecache.py
def _disable_linecache():
import linecache
def fake_getline(*args, **kwargs):
return ""
linecache.orig_getline = linecache.getline
linecache.getline = fake_getline
_disable_linecache()
| 2.265625 | 2 |
source.py | s403o/tw_bot | 0 | 2945 | <reponame>s403o/tw_bot
import requests
from bs4 import BeautifulSoup as bs
import os
#source
url = '' # the source you want the bot take images from
#down page
page = requests.get(url)
html = bs(page.text, 'html.parser')
#locate
image_loc = html.findAll('img')
#create folder for located imgs
if not os.path.exists('imgs'):
os.makedirs('imgs')
#open the new folder
os.chdir('imgs')
image0 = 0 #img name
#get images
for image in image_loc:
try:
url = image['src']
source = requests.get(url)
if source.status_code == 200:
with open('img-' + str(image0) + '.jpg', 'png') as mkimg:
mkimg.write(requests.get(url).content)
mkimg.close()
image0 += 1
except:
pass
| 2.90625 | 3 |
lib/appController.py | QIAOANGeo/BZB_ydzw | 2 | 2946 | <filename>lib/appController.py
'''
1、启动appium服务
subproccess
配置文件
1.1、校验服务是否启动
1.2、杀掉上一次的服务
2、启动driver
'''
from lib.tools import Tool
import subprocess
from lib.path import SYSTEMPATH, ERRORPATH
import time
from appium import webdriver
import queue
# 声明一个python队列
driver_queue = queue.Queue()
class Controller(object):
def __init__(self):
# 获取配置信息
self.config = Tool().get_config
self.tester = self.config.get('tester')
self.device_type = self.config.get('device_type')
# 获取到所有的手机信息
self.devices = self.config.get('devices')
self.device = self.devices.get(self.device_type)[0]
# port 用于校验服务是否启动
self.port = self.device.get('port')
self.name = self.device.get('name')
def kill_server(self):
mac = '''ps -ef|grep appium|grep -v grep|grep %s|awk '{print "kill -9 " $2}'|sh''' % self.port
win = 'taskkill /F /IM node.exe /t'
subprocess.getoutput(win)
def start_server(self):
self.kill_server()
command = 'appium -a {ip} -p {port} -U {deviceName}'.format(ip=self.device.get('ip'),
port=self.device.get('port'),
deviceName=self.device.get('deviceName'))
print('command : %s' % command)
subprocess.Popen(command, stdout=open(SYSTEMPATH, 'a+'), stderr=open(ERRORPATH, 'a+'), shell=True)
def test_server(self):
# mac = 'ps -ef|grep appium|grep -v grep|grep %s' % self.port
win = 'netstat -ano | findstr %s' % self.port
time.sleep(3)
while True:
data = subprocess.getoutput(win)
if data:
time.sleep(10)
print('%s 端口启动成功。' % self.port)
break
else:
print('%s 端口启动失败。5秒后重试。' % self.port)
time.sleep(5)
return True
def start_driver(self):
url = 'http://{ip}:{port}/wd/hub'.format(ip=self.device.get('ip'),
port=self.port)
# 合并手机信息和包名入口
self.device.update(self.tester)
driver = webdriver.Remote(url, self.device)
driver_queue.put(driver)
if __name__ == '__main__':
controller = Controller()
controller.start_server()
if controller.test_server():
controller.start_driver() | 2.8125 | 3 |
pondSizes.py | passionzhan/LeetCode | 1 | 2947 | # -*- encoding: utf-8 -*-
'''
@project : LeetCode
@File : pondSizes.py
@Contact : <EMAIL>
@Desc :
你有一个用于表示一片土地的整数矩阵land,该矩阵中每个点的值代表对应地点的海拔高度。若值为0则表示水域。由垂直、水平或对角连接的水域为池塘。池塘的大小是指相连接的水域的个数。编写一个方法来计算矩阵中所有池塘的大小,返回值需要从小到大排序。
示例:
输入:
[
[0,2,1,0],
[0,1,0,1],
[1,1,0,1],
[0,1,0,1]
]
输出: [1,2,4]
提示:
0 < len(land) <= 1000
0 < len(land[i]) <= 1000
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/pond-sizes-lcci
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020-03-07 zhan 1.0 None
'''
from typing import List
from collections import deque
class Solution:
def pondSizes(self, land: List[List[int]]) -> List[int]:
def neighbors(iR,iC,flag):
ans = set()
if (iR-1,iC-1) in flag:
ans.add((iR-1,iC-1))
if (iR-1,iC) in flag:
ans.add((iR-1,iC))
if (iR-1,iC+1) in flag:
ans.add((iR-1,iC+1))
if (iR,iC-1) in flag:
ans.add((iR,iC-1))
if (iR, iC + 1) in flag:
ans.add((iR, iC + 1))
if (iR + 1, iC-1) in flag:
ans.add((iR + 1, iC-1))
if (iR + 1, iC) in flag:
ans.add((iR + 1, iC))
if (iR+1, iC + 1) in flag:
ans.add((iR+1, iC + 1))
return ans
flag = {(i,j) for j in range(len(land[0])) for i in range(len(land)) if land[i][j] == 0}
ans = []
while flag:
tmpArea = 0
mydueque = deque()
mydueque.append(flag.pop())
while mydueque:
curEle = mydueque.popleft()
tmpArea +=1
for neighbor in neighbors(curEle[0], curEle[1], flag):
mydueque.append(neighbor)
flag.remove(neighbor)
ans.append(tmpArea)
ans.sort()
return ans
if __name__ == '__main__':
a = [
[0,2,1,0],
[0,1,0,1],
[1,1,0,1],
[0,1,0,1]
]
ans = Solution().pondSizes(a)
print(ans)
| 3.375 | 3 |
geolucidate/functions.py | kurtraschke/geolucidate | 3 | 2948 | <filename>geolucidate/functions.py
# -*- coding: utf-8 -*-
from decimal import Decimal, setcontext, ExtendedContext
from geolucidate.links.google import google_maps_link
from geolucidate.links.tools import MapLink
from geolucidate.parser import parser_re
setcontext(ExtendedContext)
def _cleanup(parts):
"""
Normalize up the parts matched by :obj:`parser.parser_re` to
degrees, minutes, and seconds.
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30',
... 'longdeg':'50','longmin':'40'})
['S', '60', '30', '00', 'W', '50', '40', '00']
>>> _cleanup({'latdir': 'south', 'longdir': 'west',
... 'latdeg':'60','latmin':'30', 'latdecsec':'.50',
... 'longdeg':'50','longmin':'40','longdecsec':'.90'})
['S', '60', '30.50', '00', 'W', '50', '40.90', '00']
"""
latdir = (parts['latdir'] or parts['latdir2']).upper()[0]
longdir = (parts['longdir'] or parts['longdir2']).upper()[0]
latdeg = parts.get('latdeg')
longdeg = parts.get('longdeg')
latmin = parts.get('latmin', '00') or '00'
longmin = parts.get('longmin', '00') or '00'
latdecsec = parts.get('latdecsec', '')
longdecsec = parts.get('longdecsec', '')
if (latdecsec and longdecsec):
latmin += latdecsec
longmin += longdecsec
latsec = '00'
longsec = '00'
else:
latsec = parts.get('latsec', '') or '00'
longsec = parts.get('longsec', '') or '00'
return [latdir, latdeg, latmin, latsec, longdir, longdeg, longmin, longsec]
def _convert(latdir, latdeg, latmin, latsec,
longdir, longdeg, longmin, longsec):
"""
Convert normalized degrees, minutes, and seconds to decimal degrees.
Quantize the converted value based on the input precision and
return a 2-tuple of strings.
>>> _convert('S','50','30','30','W','50','30','30')
('-50.508333', '-50.508333')
>>> _convert('N','50','27','55','W','127','27','65')
('50.459167', '-127.460833')
"""
if (latsec != '00' or longsec != '00'):
precision = Decimal('0.000001')
elif (latmin != '00' or longmin != '00'):
precision = Decimal('0.001')
else:
precision = Decimal('1')
latitude = Decimal(latdeg)
latmin = Decimal(latmin)
latsec = Decimal(latsec)
longitude = Decimal(longdeg)
longmin = Decimal(longmin)
longsec = Decimal(longsec)
if latsec > 59 or longsec > 59:
# Assume that 'seconds' greater than 59 are actually a decimal
# fraction of minutes
latitude += (latmin +
(latsec / Decimal('100'))) / Decimal('60')
longitude += (longmin +
(longsec / Decimal('100'))) / Decimal('60')
else:
latitude += (latmin +
(latsec / Decimal('60'))) / Decimal('60')
longitude += (longmin +
(longsec / Decimal('60'))) / Decimal('60')
if latdir == 'S':
latitude *= Decimal('-1')
if longdir == 'W':
longitude *= Decimal('-1')
lat_str = str(latitude.quantize(precision))
long_str = str(longitude.quantize(precision))
return (lat_str, long_str)
def replace(string, sub_function=google_maps_link()):
"""
Replace detected coordinates with a map link, using the given substitution
function.
The substitution function will be passed a :class:`~.MapLink` instance, and
should return a string which will be substituted by :func:`re.sub` in place
of the detected coordinates.
>>> replace("58147N/07720W")
'<a href="http://maps.google.com/maps?q=58.235278%2C-77.333333+%2858147N%2F07720W%29&ll=58.235278%2C-77.333333&t=h" title="58147N/07720W (58.235278, -77.333333)">58147N/07720W</a>'
>>> replace("5814N/07720W", google_maps_link('satellite'))
'<a href="http://maps.google.com/maps?q=58.233%2C-77.333+%285814N%2F07720W%29&ll=58.233%2C-77.333&t=k" title="5814N/07720W (58.233, -77.333)">5814N/07720W</a>'
>>> from geolucidate.links.bing import bing_maps_link
>>> replace("58N/077W", bing_maps_link('map'))
'<a href="http://bing.com/maps/default.aspx?style=r&cp=58~-77&sp=Point.58_-77_58N%2F077W&v=2" title="58N/077W (58, -77)">58N/077W</a>'
"""
def do_replace(match):
original_string = match.group()
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
return sub_function(MapLink(original_string, latitude, longitude))
return parser_re.sub(do_replace, string)
def get_replacements(string, sub_function=google_maps_link()):
"""
Return a dict whose keys are instances of :class:`re.Match` and
whose values are the corresponding replacements. Use
:func:`get_replacements` when the replacement cannot be performed
through ordinary string substitution by :func:`re.sub`, as in
:func:`replace`.
>>> get_replacements("4630 NORTH 5705 WEST 58147N/07720W")
... #doctest: +ELLIPSIS
{<re.Match object...>: '<a href="..." title="...">4630 NORTH 5705 WEST</a>', <re.Match object...>: '<a href="..." title="...">58147N/07720W</a>'}
>>> test_string = "4630 NORTH 5705 WEST 58147N/07720W"
>>> replacements = get_replacements(test_string)
>>> offset = 0
>>> out = bytearray(test_string, encoding="ascii", errors="replace")
>>> for (match, link) in replacements.items():
... start = match.start() + offset
... end = match.end() + offset
... out[start:end] = bytearray(link, encoding="ascii", errors="replace")
... offset += (len(link) - len(match.group()))
>>> out.decode(encoding="ascii") == replace(test_string)
True
"""
substitutions = {}
matches = parser_re.finditer(string)
for match in matches:
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
substitutions[match] = sub_function(MapLink(match.group(),
latitude, longitude))
return substitutions
| 2.703125 | 3 |
setup.py | rluzuriaga/pokedex | 30 | 2949 | from setuptools import setup, find_packages
setup(
name='Pokedex',
version='0.1',
zip_safe=False,
packages=find_packages(),
package_data={
'pokedex': ['data/csv/*.csv']
},
install_requires=[
'SQLAlchemy>=1.0,<2.0',
'whoosh>=2.5,<2.7',
'markdown==2.4.1',
'construct==2.5.3',
'six>=1.9.0',
],
entry_points={
'console_scripts': [
'pokedex = pokedex.main:setuptools_entry',
],
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.7",
]
)
| 1.296875 | 1 |
lingvo/tasks/asr/encoder.py | j-luo93/lingvo | 4 | 2950 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for the speech model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.python.ops import inplace_ops
from lingvo.core import base_encoder
from lingvo.core import base_layer
from lingvo.core import layers
from lingvo.core import plot
from lingvo.core import py_utils
from lingvo.core import rnn_cell
from lingvo.core import rnn_layers
from lingvo.core import summary_utils
from lingvo.core import model_helper
ConvLSTMBlock = collections.namedtuple('ConvLSTMBlock', ('rnn', 'cnn'))
class AsrEncoder(base_encoder.BaseEncoder):
"""Speech encoder version 1."""
@classmethod
def Params(cls):
"""Configs for AsrEncoder."""
p = super(AsrEncoder, cls).Params()
p.Define('lstm_tpl', rnn_cell.LSTMCellSimple.Params(),
'Configs template for the RNN layer.')
p.Define('cnn_tpl', layers.ConvLayer.Params(),
'Configs template for the conv layer.')
p.Define('proj_tpl', layers.ProjectionLayer.Params(),
'Configs template for the projection layer.')
p.Define(
'highway_skip', False,
'If set, residual connections from different layers are gated. '
'Will only be used if residual_start is enabled.')
p.Define('highway_skip_tpl', layers.HighwaySkipLayer.Params(),
'Configs template for the highway skip layer.')
p.Define('conv_lstm_tpl', rnn_cell.ConvLSTMCell.Params(),
'Configs template for ConvLSTMCell.')
p.Define(
'after_conv_lstm_cnn_tpl', layers.ConvLayer.Params(),
'Configs template for the cnn layer immediately follow the'
' convlstm layer.')
p.Define('conv_filter_shapes', None, 'Filter shapes for each conv layer.')
p.Define('conv_filter_strides', None, 'Filter strides for each conv layer.')
p.Define('input_shape', [None, None, None, None],
'Shape of the input. This should a TensorShape with rank 4.')
p.Define('lstm_cell_size', 256, 'LSTM cell size for the RNN layer.')
p.Define('num_cnn_layers', 2, 'Number of conv layers to create.')
p.Define('num_conv_lstm_layers', 1, 'Number of conv lstm layers to create.')
p.Define('num_lstm_layers', 3, 'Number of rnn layers to create')
p.Define('project_lstm_output', True,
'Include projection layer after each encoder LSTM layer.')
p.Define('pad_steps', 6,
'Extra zero-padded timesteps to add to the input sequence. ')
p.Define(
'residual_start', 0, 'Start residual connections from this lstm layer. '
'Disabled if 0 or greater than num_lstm_layers.')
p.Define('residual_stride', 1,
'Number of lstm layers to skip per residual connection.')
p.Define(
'bidi_rnn_type', 'func', 'Options: func, native_cudnn. '
'func: BidirectionalFRNN, '
'native_cudnn: BidirectionalNativeCuDNNLSTM.')
# TODO(yonghui): Maybe move those configs to a separate file.
# Set some reasonable default values.
#
# NOTE(yonghui): The default config below assumes the following encoder
# architecture:
#
# cnn/batch-norm/relu ->
# cnn/batch-norm/relu ->
# bidirectional conv-lstm ->
# cnn/batch-norm/relu
# bidirectional lstm ->
# projection/batch-norm/relu ->
# bidirectional lstm ->
# projection/batch-norm/relu ->
# bidirectional lstm
#
# Default config for the rnn layer.
p.lstm_tpl.params_init = py_utils.WeightInit.Uniform(0.1)
# Default config for the convolution layer.
p.input_shape = [None, None, 80, 3]
p.conv_filter_shapes = [(3, 3, 3, 32), (3, 3, 32, 32)]
p.conv_filter_strides = [(2, 2), (2, 2)]
p.cnn_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
# TODO(yonghui): Disable variational noise logic.
# NOTE(yonghui): Fortunately, variational noise logic is currently not
# implemented for ConvLayer yet (as of sep 22, 2016).
# Default config for the projection layer.
p.proj_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
# TODO(yonghui): Disable variational noise logic.
# NOTE(yonghui): Fortunately, variational noise logic is currently not
# implemented for ProjectionLayer yet (as of sep 22, 2016).
p.conv_lstm_tpl.filter_shape = [1, 3] # height (time), width (frequency)
p.conv_lstm_tpl.inputs_shape = [None, None, None, None]
p.conv_lstm_tpl.cell_shape = [None, None, None, None]
p.conv_lstm_tpl.params_init = py_utils.WeightInit.TruncatedGaussian(0.1)
p.after_conv_lstm_cnn_tpl.filter_shape = [3, 3, None, None]
p.after_conv_lstm_cnn_tpl.params_init = (
py_utils.WeightInit.TruncatedGaussian(0.1))
p.after_conv_lstm_cnn_tpl.filter_stride = [1, 1]
return p
@base_layer.initializer
def __init__(self, params):
super(AsrEncoder, self).__init__(params)
p = self.params
assert p.packed_input is False, ('Packed inputs are not yet supported for '
'AsrEncoder.')
name = p.name
with tf.variable_scope(name):
# First create the conv layers.
assert p.num_cnn_layers == len(p.conv_filter_shapes)
assert p.num_cnn_layers == len(p.conv_filter_strides)
params_conv_layers = []
for i in range(p.num_cnn_layers):
conv_p = p.cnn_tpl.Copy()
conv_p.name = 'conv_L%d' % (i)
conv_p.filter_shape = p.conv_filter_shapes[i]
conv_p.filter_stride = p.conv_filter_strides[i]
conv_p.is_eval = p.is_eval
params_conv_layers.append(conv_p)
self.CreateChildren('conv', params_conv_layers)
conv_output_shape = tf.TensorShape(p.input_shape)
for i in range(p.num_cnn_layers):
conv_output_shape = self.conv[i].OutShape(conv_output_shape)
conv_output_shape = conv_output_shape.as_list()
assert len(conv_output_shape) == 4 # batch, height, width, channel.
params_conv_lstm_rnn = []
params_conv_lstm_cnn = []
for i in range(p.num_conv_lstm_layers):
# NOTE(yonghui): We assume that output from ConvLSTMBlock has the same
# shape as its input.
_, _, width, in_channel = conv_output_shape
f_conv_lstm_p = p.conv_lstm_tpl.Copy()
f_conv_lstm_p.name = 'f_conv_lstm_%d' % (i)
f_conv_lstm_p.inputs_shape = [None, 1, width, in_channel]
f_conv_lstm_p.cell_shape = [None, 1, width, in_channel]
b_conv_lstm_p = f_conv_lstm_p.Copy()
b_conv_lstm_p.name = 'b_conv_lstm_%d' % (i)
conv_lstm_rnn_p = self.CreateConvLstmLayerParams()
conv_lstm_rnn_p.name = 'conv_lstm_rnn'
conv_lstm_rnn_p.fwd = f_conv_lstm_p
conv_lstm_rnn_p.bak = b_conv_lstm_p
params_conv_lstm_rnn.append(conv_lstm_rnn_p)
cnn_p = p.after_conv_lstm_cnn_tpl.Copy()
cnn_p.name = 'conv_lstm_cnn_%d' % (i)
cnn_p.filter_shape[2] = 2 * in_channel
cnn_p.filter_shape[3] = in_channel
params_conv_lstm_cnn.append(cnn_p)
# TODO(yonghui): Refactor ConvLSTMBlock into a layer.
self.CreateChildren('conv_lstm_rnn', params_conv_lstm_rnn)
self.CreateChildren('conv_lstm_cnn', params_conv_lstm_cnn)
(self._first_lstm_input_dim,
self._first_lstm_input_dim_pad) = self.FirstLstmLayerInputDimAndPadding(
conv_output_shape, pad_to_multiple=16)
# Now create all the rnn layers and projection layers.
# TODO(yonghui): take care of device placement.
params_rnn_layers = []
params_proj_layers = []
params_highway_skip_layers = []
for i in range(p.num_lstm_layers):
if i == 0:
input_dim = self._first_lstm_input_dim
else:
input_dim = 2 * p.lstm_cell_size
forward_p = p.lstm_tpl.Copy()
forward_p.name = 'fwd_rnn_L%d' % (i)
forward_p.num_input_nodes = input_dim
forward_p.num_output_nodes = p.lstm_cell_size
backward_p = forward_p.Copy()
backward_p.name = 'bak_rnn_L%d' % (i)
rnn_p = self.CreateBidirectionalRNNParams(forward_p, backward_p)
rnn_p.name = 'brnn_L%d' % (i)
params_rnn_layers.append(rnn_p)
if p.project_lstm_output and (i < p.num_lstm_layers - 1):
proj_p = p.proj_tpl.Copy()
proj_p.input_dim = 2 * p.lstm_cell_size
proj_p.output_dim = 2 * p.lstm_cell_size
proj_p.name = 'proj_L%d' % (i)
proj_p.is_eval = p.is_eval
params_proj_layers.append(proj_p)
# add the skip layers
residual_index = i - p.residual_start + 1
if p.residual_start > 0 and residual_index >= 0 and p.highway_skip:
highway_skip = p.highway_skip_tpl.Copy()
highway_skip.name = 'enc_hwskip_%d' % len(params_highway_skip_layers)
highway_skip.input_dim = 2 * p.lstm_cell_size
params_highway_skip_layers.append(highway_skip)
self.CreateChildren('rnn', params_rnn_layers)
self.CreateChildren('proj', params_proj_layers)
self.CreateChildren('highway_skip', params_highway_skip_layers)
@property
def _use_functional(self):
return True
def CreateBidirectionalRNNParams(self, forward_p, backward_p):
return model_helper.CreateBidirectionalRNNParams(self.params, forward_p,
backward_p)
def CreateConvLstmLayerParams(self):
return rnn_layers.BidirectionalFRNN.Params()
def FirstLstmLayerInputDimAndPadding(self,
conv_output_shape,
pad_to_multiple=16):
lstm_input_shape = conv_output_shape
# Makes sure the lstm input dims is multiple of 16 (alignment
# requirement from FRNN).
first_lstm_input_dim_unpadded = lstm_input_shape[2] * lstm_input_shape[3]
if self._use_functional and (first_lstm_input_dim_unpadded % pad_to_multiple
!= 0):
first_lstm_input_dim = int(
(first_lstm_input_dim_unpadded + pad_to_multiple - 1) /
pad_to_multiple) * pad_to_multiple
else:
first_lstm_input_dim = first_lstm_input_dim_unpadded
first_lstm_input_dim_padding = (
first_lstm_input_dim - first_lstm_input_dim_unpadded)
return first_lstm_input_dim, first_lstm_input_dim_padding
@property
def supports_streaming(self):
return False
def zero_state(self, batch_size):
return py_utils.NestedMap()
def FProp(self, theta, batch, state0=None):
"""Encodes source as represented by 'inputs' and 'paddings'.
Args:
theta: A NestedMap object containing weights' values of this
layer and its children layers.
batch: A NestedMap with fields:
src_inputs - The inputs tensor. It is expected to be of shape [batch,
time, feature_dim, channels].
paddings - The paddings tensor. It is expected to be of shape [batch,
time].
state0: Recurrent input state. Not supported/ignored by this encoder.
Returns:
(outputs, out_paddings, state1) tuple. Outputs is of the shape
[time, batch, depth], and out_paddings is of the shape [time, batch]
"""
p = self.params
inputs, paddings = batch.src_inputs, batch.paddings
with tf.name_scope(p.name):
# Add a few extra padded timesteps at the end. This is for ensuring the
# correctness of the conv-layers at the edges.
if p.pad_steps > 0:
# inplace_update() is not supported by TPU for now. Since we have done
# padding on the input_generator, we may avoid this additional padding.
assert not py_utils.use_tpu()
inputs_pad = tf.zeros(
inplace_ops.inplace_update(tf.shape(inputs), 1, p.pad_steps),
inputs.dtype)
paddings_pad = tf.ones(
inplace_ops.inplace_update(tf.shape(paddings), 1, p.pad_steps),
paddings.dtype)
inputs = tf.concat([inputs, inputs_pad], 1, name='inputs')
paddings = tf.concat([paddings, paddings_pad], 1)
def ReshapeForPlot(tensor, padding, name):
"""Transposes and flattens channels to [batch, dim, seq_len] shape."""
# Flatten any dimensions beyond the third into the third.
batch_size = tf.shape(tensor)[0]
max_len = tf.shape(tensor)[1]
plot_tensor = tf.reshape(tensor, [batch_size, max_len, -1])
plot_tensor = tf.transpose(plot_tensor, [0, 2, 1], name=name)
return (plot_tensor, summary_utils.SequenceLength(padding))
plots = [
ReshapeForPlot(
tf.transpose(inputs, [0, 1, 3, 2]), paddings, 'inputs')
]
conv_out = inputs
out_padding = paddings
for i, conv_layer in enumerate(self.conv):
conv_out, out_padding = conv_layer.FProp(theta.conv[i], conv_out,
out_padding)
plots.append(
ReshapeForPlot(
tf.transpose(conv_out, [0, 1, 3, 2]), out_padding,
'conv_%d_out' % i))
def TransposeFirstTwoDims(t):
first_dim = tf.shape(t)[0]
second_dim = tf.shape(t)[1]
t_new = tf.transpose(
tf.reshape(t, [first_dim, second_dim, -1]), [1, 0, 2])
t_shape_new = tf.concat([[second_dim], [first_dim], tf.shape(t)[2:]], 0)
return tf.reshape(t_new, t_shape_new)
# Now the conv-lstm part.
conv_lstm_out = conv_out
conv_lstm_out_padding = out_padding
for i, (rnn, cnn) in enumerate(
zip(self.conv_lstm_rnn, self.conv_lstm_cnn)):
conv_lstm_in = conv_lstm_out
# Move time dimension to be the first.
conv_lstm_in = TransposeFirstTwoDims(conv_lstm_in)
conv_lstm_in = tf.expand_dims(conv_lstm_in, 2)
conv_lstm_in_padding = tf.expand_dims(
tf.transpose(conv_lstm_out_padding), 2)
lstm_out = rnn.FProp(theta.conv_lstm_rnn[i], conv_lstm_in,
conv_lstm_in_padding)
# Move time dimension to be the second.
cnn_in = TransposeFirstTwoDims(lstm_out)
cnn_in = tf.squeeze(cnn_in, 2)
cnn_in_padding = conv_lstm_out_padding
cnn_out, cnn_out_padding = cnn.FProp(theta.conv_lstm_cnn[i], cnn_in,
cnn_in_padding)
conv_lstm_out, conv_lstm_out_padding = cnn_out, cnn_out_padding
plots.append(
ReshapeForPlot(conv_lstm_out, conv_lstm_out_padding,
'conv_lstm_%d_out' % i))
# Need to do a reshape before starting the rnn layers.
conv_lstm_out = py_utils.HasRank(conv_lstm_out, 4)
conv_lstm_out_shape = tf.shape(conv_lstm_out)
new_shape = tf.concat([conv_lstm_out_shape[:2], [-1]], 0)
conv_lstm_out = tf.reshape(conv_lstm_out, new_shape)
if self._first_lstm_input_dim_pad:
conv_lstm_out = tf.pad(
conv_lstm_out,
[[0, 0], [0, 0], [0, self._first_lstm_input_dim_pad]])
conv_lstm_out = py_utils.HasShape(conv_lstm_out,
[-1, -1, self._first_lstm_input_dim])
# Transpose to move the time dimension to be the first.
rnn_in = tf.transpose(conv_lstm_out, [1, 0, 2])
rnn_padding = tf.expand_dims(tf.transpose(conv_lstm_out_padding), 2)
# rnn_in is of shape [time, batch, depth]
# rnn_padding is of shape [time, batch, 1]
# Now the rnn layers.
num_skips = 0
for i in range(p.num_lstm_layers):
rnn_out = self.rnn[i].FProp(theta.rnn[i], rnn_in, rnn_padding)
residual_index = i - p.residual_start + 1
if p.residual_start > 0 and residual_index >= 0:
if residual_index % p.residual_stride == 0:
residual_in = rnn_in
if residual_index % p.residual_stride == p.residual_stride - 1:
# Highway skip connection.
if p.highway_skip:
rnn_out = self.highway_skip[num_skips].FProp(
theta.highway_skip[num_skips], residual_in, rnn_out)
num_skips += 1
else:
# Residual skip connection.
rnn_out += py_utils.HasShape(residual_in, tf.shape(rnn_out))
if p.project_lstm_output and (i < p.num_lstm_layers - 1):
# Projection layers.
rnn_out = self.proj[i].FProp(theta.proj[i], rnn_out, rnn_padding)
if i == p.num_lstm_layers - 1:
rnn_out *= (1.0 - rnn_padding)
plots.append(
ReshapeForPlot(
tf.transpose(rnn_out, [1, 0, 2]),
tf.transpose(rnn_padding, [1, 0, 2]), 'rnn_%d_out' % i))
rnn_in = rnn_out
final_out = rnn_in
if self.cluster.add_summary:
fig = plot.MatplotlibFigureSummary(
'encoder_example', figsize=(8, len(plots) * 3.5))
# Order layers from bottom to top.
plots.reverse()
for tensor, seq_len in plots:
fig.AddSubplot(
[tensor, seq_len],
summary_utils.TrimPaddingAndPlotSequence,
title=tensor.name,
xlabel='Time')
fig.Finalize()
rnn_padding = tf.squeeze(rnn_padding, [2])
return final_out, rnn_padding, py_utils.NestedMap()
| 1.9375 | 2 |
pos_neg_graph/graph_ratio.py | Yudabin/Review_Project | 0 | 2951 | import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
font_location = './wordcloud_file/malgun.ttf' # For Windows
font_name = fm.FontProperties(fname=font_location).get_name()
plt.rc('font', family=font_name)
def percent_graph2(movie_review) :
b = movie_review
labelss = sorted(b['score'].unique())## 라벨설정함. 한글이 적용이 안됨!!!
c = b['score'].value_counts().sort_index() ## 빈도
print(c)
print(labelss)
fig = plt.figure(figsize=(8,8)) ## 캔버스 생성
fig.set_facecolor('white') ## 캔버스 배경색을 하얀색으로 설정
ax = fig.add_subplot() ## 프레임 생성
pie = ax.pie(c, ## 파이차트 출력
startangle=90, ## 시작점을 90도(degree)로 지정
counterclock=False, ## 시계 방향으로 그린다.
# autopct=lambda p : '{:.2f}%'.format(p), ## 퍼센티지 출력
wedgeprops=dict(width=0.5),
colors = ['yellowgreen', 'orange'],
labels = labelss,
textprops={'fontsize': 22}
)
total = np.sum(c) ## 빈도수 총합
sum_pct = 0 ## 백분율 초기값
for i, l in enumerate(labelss):
ang1, ang2 = pie[0][i].theta1, pie[0][i].theta2 ## 각1, 각2
r = pie[0][i].r ## 원의 반지름
x = ((r + 0.5) / 2) * np.cos(np.pi / 180 * ((ang1 + ang2) / 2)) ## 정중앙 x좌표
y = ((r + 0.5) / 2) * np.sin(np.pi / 180 * ((ang1 + ang2) / 2)) ## 정중앙 y좌표
if i < len(labelss) - 1:
sum_pct += float(f'{c[i] / total * 100:.2f}') ## 백분율을 누적한다.
ax.text(x, y, f'{c[i] / total * 100:.2f}%', ha='center', va='center', size=22, color='white',
weight='bold') ## 백분율 텍스트 표시
else: ## 총합을 100으로 맞추기위해 마지막 백분율은 100에서 백분율 누적값을 빼준다.
ax.text(x, y, f'{100 - sum_pct:.2f}%', ha='center', va='center',size=22,color='white',
weight='bold')
# pie.rc('font', family=font_name)
# plt.legend(pie[0], labelss) ## 범례 표시
plt.savefig('./static/images/pos_neg_ratio.png') # 경로 | 2.71875 | 3 |
blog/views.py | kbilak/City-portal | 0 | 2952 | <reponame>kbilak/City-portal
from django.shortcuts import render
from django.views.generic import TemplateView
def index(request):
return render(request, 'index.html') | 1.515625 | 2 |
my_area/views.py | Vincent-Juma/area_master | 1 | 2953 | from django.shortcuts import render
from .forms import *
from django.shortcuts import redirect,get_object_or_404
from django.contrib.auth.decorators import login_required
from . models import *
from django.views import generic
@login_required(login_url='/accounts/login/')
def home(request):
mylocs = Myloc.objects.all()
return render(request, 'home.html',{"mylocs":mylocs,})
@login_required(login_url='accounts/login/')
def add_profile(request):
current_user = request.user
profile = Profile.objects.filter(id = current_user.id)
if request.method == 'POST':
form = NewProfileForm(request.POST, request.FILES)
if form.is_valid():
caption = form.save(commit=False)
caption.user = current_user
caption.save()
return redirect('myprofile')
else:
form = NewProfileForm()
return render(request, 'edit.html', {"form":form})
@login_required(login_url='accounts/login/')
def my_profile(request):
current_user = request.user
my_my_area = Myloc.objects.filter(user = current_user)
my_profile = Profile.objects.filter(user = current_user).first
return render(request, 'profile.html', {"my_my_areas":my_my_areas, "my_profile":my_profile})
@login_required(login_url='/accounts/login/')
def addmy_area(request):
current_user = request.user
if request.method == 'POST':
form = MylocForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.user = current_user
image.save()
return redirect('home')
else:
form = MylocForm()
return render(request, 'addmy_area.html', {"form": form})
def myloc_details(request,myloc_id):
activities=Activity.objects.filter(myloc=myloc_id)
posts=Post.objects.filter(myloc=myloc_id)
myloc=Myloc.objects.get(pk=myloc_id)
return render(request,'details.html',{'myloc':myloc,'activities':activities,'posts':posts})
@login_required(login_url="/accounts/login/")
def new_activity(request,pk):
current_user = request.user
myloc = get_object_or_404(Myloc,pk=pk)
if request.method == 'POST':
activity_form = NewActivityForm(request.POST, request.FILES)
if activity_form.is_valid():
activity = activity_form.save(commit=False)
activity.user = current_user
activity.myloc=myloc
activity.save()
return redirect('detail', myloc_id=myloc.id)
else:
activity_form = NewActivityForm()
return render(request, 'new_activity.html', {"form": activity_form,'myloc':myloc})
@login_required(login_url="/accounts/login/")
def new_post(request,pk):
current_user = request.user
myloc = get_object_or_404(Myloc,pk=pk)
if request.method == 'POST':
post_form = NewPostForm(request.POST, request.FILES)
if post_form.is_valid():
post = post_form.save(commit=False)
post.user = current_user
post.myloc=myloc
post.save()
return redirect('detail', myloc_id=myloc.id)
else:
post_form = NewPostForm()
return render(request, 'new_post.html', {"form": post_form,'myloc':myloc})
@login_required(login_url='/accounts/login/')
def search_project(request):
if 'project_name' in request.GET and request.GET["project_name"]:
search_term = request.GET.get("project_name")
searched_project = Myloc.search_by_location(search_term)
message = f"{search_term}"
return render(request, "search.html",{"message":message,"project": searched_project})
else:
message = "No search history"
return render(request, 'search.html',{"message":message})
| 2.171875 | 2 |
2020/day_01/__main__.py | d02d33pak/Advent-Of-Code | 0 | 2954 | """
Day 1 Main Module
"""
from day01 import parse_input, part1, part2
if __name__ == "__main__":
# trying out the new walrus[:=] oprtr in python
if (part := int(input("Enter Part: "))) == 1:
print(part1(parse_input("input.txt")))
elif part == 2:
print(part2(parse_input("input.txt")))
else:
print("Wrong choice [1|2]")
| 3.28125 | 3 |
quiz_app/settings.py | ignasgri/Django_Quiz | 0 | 2955 | """
Django settings for quiz_app project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
SITE_ID = 1
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['ignas-quiz.herokuapp.com','localhost','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'storages',
'quiz',
'multichoice',
'true_false',
'essay',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'quiz_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'quiz_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS= (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
DATABASES = {'default': dj_database_url.parse(os.environ.get('DATABASE_URL')) }
AWS_HEADERS = { # see http://developer.yahoo.com/performance/rules.html#expires
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'Cache-Control': 'max-age=94608000',
}
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_STORAGE_BUCKET_NAME")
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_S3_HOST = 's3-eu-west-1.amazonaws.com'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'custom_storages.MediaStorage' | 2.078125 | 2 |
scripts/data/topple_dataset.py | davrempe/predicting-physical-dynamics | 16 | 2956 | import numpy as np
import pickle
from os.path import exists, realpath
import sys
import math
from topple_data_loader import ToppleData, ToppleDataLoader
import transforms3d
class ToppleNormalizationInfo():
'''
Structure to hold all the normalization information for a dataset.
'''
def __init__(self):
# max element of any linear vel vector
self.max_lin_vel = None
# max element of any angular vel vector
self.max_ang_vel = None
# max distance between positions in two contiguous timesteps
self.max_pos = None
# max change in rotation around any axis between two contiguous timesteps (for euler rot)
self.max_rot = None
# max angle of rotation between two steps for axis-angle representation
self.max_delta_rot = None
# max 2-norm of applied impulse vector
self.force_vec_max = None
# max 2-norm of a point in an object point cloud (used for point cloud and force pos)
self.pc_max = None
# normalization values for shape-related stuff
self.density_offset = None
self.density_max = None
self.mass_offset = None
self.mass_max = None
self.inertia_offset = None
self.inertia_max = None
self.friction_offset = None
self.friction_max = None
def print_out(self):
print({'max_lin_vel' : self.max_lin_vel, 'max_ang_vel' : self.max_ang_vel, 'max_pos' : self.max_pos, \
'max_rot' : self.max_rot, 'max_delta_rot' : self.max_delta_rot, 'force_vec_max' : self.force_vec_max, 'pc_max' : self.pc_max, \
'density_off' : self.density_offset, 'density_max' : self.density_max, 'mass_off' : self.mass_offset, \
'mass_max' : self.mass_max, 'inertia_off' : self.inertia_offset, 'inertia_max' : self.inertia_max, \
'friction_off' : self.friction_offset, 'friction_max' : self.friction_max
})
def save(self, pkl_file):
''' Saves normalization info object to a specified .pkl file. '''
with open(pkl_file, 'wb') as f:
pickle.dump(self, f)
def load_from(self, pkl_file):
''' Load normalization info into this object from a specified .pkl file. '''
with open(pkl_file, 'rb') as f:
norm_info = pickle.load(f)
self.copy_from(norm_info)
def copy_from(self, norm_info):
'''
Takes values from the given normalization info object and copies them to this one
'''
self.max_lin_vel = norm_info.max_lin_vel
self.max_ang_vel = norm_info.max_ang_vel
self.max_pos = norm_info.max_pos
self.max_rot = norm_info.max_rot
try:
self.max_delta_rot = norm_info.max_delta_rot
except:
# old versions of data doesn't have max delta rot
pass
self.force_vec_max = norm_info.force_vec_max
self.pc_max = norm_info.pc_max
self.density_offset = norm_info.density_offset
self.density_max = norm_info.density_max
self.mass_offset = norm_info.mass_offset
self.mass_max = norm_info.mass_max
self.inertia_offset = norm_info.inertia_offset
self.inertia_max = norm_info.inertia_max
try:
self.friction_offset = norm_info.friction_offset
self.friction_max = norm_info.friction_max
except:
# old version doesn't have this
pass
class ToppleBatch(object):
'''
Structure to hold a single batch of data.
'''
def __init__(self, size, seq_len, num_pts):
self.size = size
self.num_steps = seq_len
self.num_pts = num_pts
self.point_cloud = np.zeros((self.size, self.num_pts, 3))
self.lin_vel = np.zeros((self.size, self.num_steps, 3))
self.ang_vel = np.zeros((self.size, self.num_steps, 3))
self.pos = np.zeros((self.size, self.num_steps, 3))
# cummulative euler angles
self.rot = np.zeros((self.size, self.num_steps, 3))
# change in rotation in quaternion rep (w, x, y, z)
self.delta_quat = np.zeros((self.size, self.num_steps, 4))
# change in rotation between steps in axis-angle rep (scaled 3 vec)
self.delta_rot = np.zeros((self.size, self.num_steps, 3))
# change in rotation between steps in split axis-angle rep (4-vec)
self.delta_rot_split = np.zeros((self.size, self.num_steps, 4))
# 0 if before topple idx, 1 if after
self.topple_label = np.zeros((self.size, self.num_steps), dtype=int)
# other meta-data not directly used in network
self.toppled = []
self.shape_name = []
self.body_friction = np.zeros((self.size))
self.mass = np.zeros((self.size))
self.scale = np.zeros((self.size, 3))
self.rot_euler = np.zeros((self.size, self.num_steps, 3))
class ToppleDataset(object):
'''
Loads toppling data and provides batches for training and model evaluation.
'''
def __init__(self, roots, norm_info_file, batch_size=32, num_steps=15, shuffle=False, num_pts=None, perturb_pts=0.0):
'''
- roots : list of directories containing data to load for this dataset
- norm_info_file : .pkl file containing normalization information
- batch_size : number of sequences to return in each batch
- num_steps : number of timesteps to return in each sequence
- shuffle : randomly shuffles the returned sequence ordering
- num_pts : the number of points to use in the returned point cloud. If None uses all points in the data.
- perturb_pts : the stdev to randomly perturb point clouds with. If None no perturbation is performed.
-
'''
# settings
self.batch_size = batch_size
self.steps_per_seq = num_steps
self.shuffle = shuffle
self.perturb_std = perturb_pts
self.num_pts = num_pts
# load in data
for root in roots:
if not exists(root):
print('Could not find dataset at ' + root)
return
data_loader = ToppleDataLoader()
self.data = data_loader.load_data(roots)
if num_pts is None:
# use all the points in the point cloud
self.num_pts = self.data.point_cloud.shape[1]
# load in normalization info
if not exists(norm_info_file):
print('Could not find normalization info at ' + norm_info_file)
return
self.norm_info = ToppleNormalizationInfo()
self.norm_info.load_from(norm_info_file)
print('Loaded normalization info!')
# see if we have axis-angle info (for backwards compat)
self.use_aa = False
self.use_aa_split = False
self.use_topple_idx = False
self.use_delta_quat = False
if len(self.data.delta_rot) > 0:
self.use_aa = True
if len(self.data.delta_rot_split) > 0:
self.use_aa_split = True
if len(self.data.topple_idx) > 0:
self.use_topple_idx = True
if len(self.data.body_friction) > 0:
self.use_body_friction = True
if len(self.data.delta_quat) > 0:
self.use_delta_quat = True
# normalize the data
print('Normalizing data...')
self.normalize_data(self.data, self.norm_info)
print('Finished normalizing!')
# order to iterate through data when returning batches (in order by default)
self.iter_inds = range(0, self.data.size)
# prepare to iterate through
self.reset()
def normalize_data(self, data, norm_info):
'''
Normalizes (in place) the given ToppleData using the ToppleNormalizationInfo.
'''
# point clouds -> [-1, 1]
data.point_cloud /= norm_info.pc_max
# force pos -> [-1, 1]
data.force_pos /= norm_info.pc_max
# force vec -> [-1, 1]
data.force_vec /= norm_info.force_vec_max
# density -> [0, 1]
data.density = (data.density - norm_info.density_offset) / norm_info.density_max
# mass -> [0, 1]
data.mass = (data.mass - norm_info.mass_offset) / norm_info.mass_max
# inertia -> [0, 1]
data.inertia = (data.inertia - norm_info.inertia_offset) / norm_info.inertia_max
# friction -> [0, 1]
if norm_info.friction_offset is not None:
data.body_friction = (data.body_friction - norm_info.friction_offset) / norm_info.friction_max
# now time sequence data
# velocities -> [-1, 1]
for i, lin_vel_steps in enumerate(data.lin_vel):
data.lin_vel[i] = [(x / norm_info.max_lin_vel) for x in lin_vel_steps]
for i, ang_vel_steps in enumerate(data.ang_vel):
data.ang_vel[i] = [(x / norm_info.max_ang_vel) for x in ang_vel_steps]
# delta position -> [-1, 1]
for i, pos_steps in enumerate(data.pos):
data.pos[i] = [(x / norm_info.max_pos) for x in pos_steps]
# delta rotation -> [-1, 1]
for i, rot_steps in enumerate(data.total_rot):
data.total_rot[i] = [(x / norm_info.max_rot) for x in rot_steps]
# delta rot axis-angle -> [-1, 1] norm
if self.use_aa:
for i, delta_rot_steps in enumerate(data.delta_rot):
data.delta_rot[i] = [(x / norm_info.max_delta_rot) for x in delta_rot_steps]
# make axes unit and and normalize angle -> [-1, 1]
if self.use_aa_split:
for i, delta_rot_split_steps in enumerate(data.delta_rot_split):
data.delta_rot_split[i] = [np.append(x[:3] / np.linalg.norm(x[:3]), x[3] / norm_info.max_delta_rot) for x in delta_rot_split_steps]
def reset(self):
'''
Prepares to iterate through dataset.
'''
if self.shuffle:
np.random.shuffle(self.iter_inds)
# we consider an epoch as returning one sequence from every single simulation
# ( though if the sequence length is shorter than sim length the unique sequences contained
# in the dataset will be much more than an epoch length )
self.num_batches = (self.data.size + self.batch_size - 1) // self.batch_size
self.batch_idx = 0
def has_next_batch(self):
'''
Returns false if done with the current "epoch" (seen each sim once).
'''
return self.batch_idx < self.num_batches
def next_batch(self, random_window=True, focus_toppling=False):
'''
Returns the next batch of data. if random_window=True will get a random sequence of correct length (otherwise
starts at 0). If focus_toppling=True, will make sure this sequence includes the part of the sequence where toppling occurs.
'''
# size is either batch_size, or shorter if we're at the end of the data
start_idx = self.batch_idx * self.batch_size
end_idx = min((self.batch_idx + 1) * self.batch_size, self.data.size)
batch_size = end_idx - start_idx
# get batch data
batch = ToppleBatch(self.batch_size, self.steps_per_seq, self.num_pts)
for i in range(batch_size):
pc, lin_vel, ang_vel, pos, rot, delta_quat, delta_rot, delta_rot_split, topple_label, meta_info = \
self.get_seq(self.iter_inds[start_idx + i], self.steps_per_seq, random_window, focus_toppling)
batch.point_cloud[i] = pc
batch.lin_vel[i] = lin_vel
batch.ang_vel[i] = ang_vel
batch.pos[i] = pos
batch.rot[i] = rot
if self.use_delta_quat:
batch.delta_quat[i] = delta_quat
if self.use_aa:
batch.delta_rot[i] = delta_rot
if self.use_aa_split:
batch.delta_rot_split[i] = delta_rot_split
if self.use_topple_idx:
batch.topple_label[i] = topple_label
batch.toppled.append(meta_info[0])
batch.shape_name.append(meta_info[1])
batch.scale[i] = meta_info[2]
batch.rot_euler[i] = meta_info[3]
if self.use_body_friction:
batch.body_friction[i] = meta_info[4]
batch.mass[i] = meta_info[5]
if batch_size != self.batch_size:
# need to pad the end with repeat of data
for i in range(self.batch_size - batch_size):
batch.point_cloud[batch_size + i] = batch.point_cloud[i]
batch.lin_vel[batch_size + i] = batch.lin_vel[i]
batch.ang_vel[batch_size + i] = batch.ang_vel[i]
batch.pos[batch_size + i] = batch.pos[i]
batch.rot[batch_size + i] = batch.rot[i]
if self.use_delta_quat:
batch.delta_quat[batch_size + i] = batch.delta_quat[i]
batch.toppled.append(batch.toppled[i])
batch.shape_name.append(batch.shape_name[i])
batch.scale[batch_size + i] = batch.scale[i]
batch.rot_euler[batch_size + i] = batch.rot_euler[i]
batch.mass[batch_size + i] = batch.mass[i]
if self.use_aa:
batch.delta_rot[batch_size + i] = batch.delta_rot[i]
if self.use_aa_split:
batch.delta_rot_split[batch_size + i] = batch.delta_rot_split[i]
if self.use_topple_idx:
batch.topple_label[batch_size + i] = batch.topple_label[i]
if self.use_body_friction:
batch.body_friction[batch_size + i] = batch.body_friction[i]
self.batch_idx += 1
return batch
def get_seq(self, idx, num_steps, random_window=True, focus_toppling=False):
'''
Returns a random contiguous sequence from the simulation at the given idx and length num_steps.
If num_steps > sim_length the final (sim_length-num_steps) steps are padded with the value at
sim[sim_length].
'''
# get the normalized canonical point cloud for this simulation
pc = np.copy(self.data.point_cloud[self.data.shape_idx[idx]])
scale = self.data.scale[idx]
# scale accordingly
pc *= np.reshape(scale, (1, -1))
# randomly perturb point cloud
pc += np.random.normal(0.0, self.perturb_std, pc.shape)
# randomly draw a subset of points if desired
if self.num_pts < pc.shape[0]:
pc_inds = np.random.choice(pc.shape[0], self.num_pts, replace=False)
pc = pc[pc_inds, :]
# randomly choose a size num_steps sequence from the simulation to return time-series data
total_steps = len(self.data.lin_vel[idx])
max_start_step = total_steps - num_steps
start_step = 0
if max_start_step < 0:
# simulation is shorter than desired sequence length
pad_len = abs(max_start_step)
lin_vel_list = self.data.lin_vel[idx]
lin_vel_out = np.array(lin_vel_list + [lin_vel_list[-1]]*pad_len)
ang_vel_list = self.data.ang_vel[idx]
ang_vel_out = np.array(ang_vel_list + [ang_vel_list[-1]]*pad_len)
pos_list = self.data.pos[idx]
pos_out = np.array(pos_list + [pos_list[-1]]*pad_len)
rot_list = self.data.total_rot[idx]
rot_out = np.array(rot_list + [rot_list[-1]]*pad_len)
if self.use_delta_quat:
delta_quat_list = self.data.delta_quat[idx]
delta_quat_out = np.array(delta_quat_list + [delta_quat_list[-1]]*pad_len)
euler_rot_list = self.data.rot_euler[idx]
euler_rot_out = np.array(euler_rot_list + [euler_rot_list[-1]]*pad_len)
if self.use_aa:
delta_rot_list = self.data.delta_rot[idx]
delta_rot_out = np.array(delta_rot_list + [delta_rot_list[-1]]*pad_len)
if self.use_aa_split:
delta_rot_split_list = self.data.delta_rot_split[idx]
delta_rot_split_out = np.array(delta_rot_split_list + [delta_rot_split_list[-1]]*pad_len)
if self.use_topple_idx:
topple_label_out = np.zeros((total_steps + pad_len), dtype=int)
seq_topple_idx = self.data.topple_idx[idx]
if seq_topple_idx > 0:
topple_label_out[seq_topple_idx:] = 1
else:
start_step = 0
if random_window:
if focus_toppling and self.data.toppled[idx]:
# choose window around the index where it topples
topple_idx = self.data.topple_idx[idx]
min_idx = max([topple_idx - num_steps + 1, 0])
if min_idx >= max_start_step:
# just pick the max index
start_step = max_start_step
else:
# our window is guaranteed to see some part of toppling
start_step = np.random.randint(min_idx, max_start_step+1)
else:
start_step = np.random.randint(0, max_start_step+1)
end_step = start_step + num_steps
# print('Range: %d, %d' % (start_step, end_step))
lin_vel_out = np.array(self.data.lin_vel[idx][start_step:end_step])
ang_vel_out = np.array(self.data.ang_vel[idx][start_step:end_step])
pos_out = np.array(self.data.pos[idx][start_step:end_step])
rot_out = np.array(self.data.total_rot[idx][start_step:end_step])
if self.use_delta_quat:
delta_quat_out = np.array(self.data.delta_quat[idx][start_step:end_step])
euler_rot_out = np.array(self.data.rot_euler[idx][start_step:end_step])
if self.use_aa:
delta_rot_out = np.array(self.data.delta_rot[idx][start_step:end_step])
if self.use_aa_split:
delta_rot_split_out = np.array(self.data.delta_rot_split[idx][start_step:end_step])
if self.use_topple_idx:
topple_label_out = np.zeros((num_steps), dtype=int)
seq_topple_idx = self.data.topple_idx[idx]
if seq_topple_idx > 0:
if seq_topple_idx <= start_step:
topple_label_out[:] = 1
elif seq_topple_idx < end_step:
topple_label_out[seq_topple_idx-start_step:] = 1
# rotate point cloud to align with first frame of sequence
init_rot = self.data.rot_euler[idx][start_step]
xrot, yrot, zrot = np.radians(init_rot)
R = transforms3d.euler.euler2mat(zrot, xrot, yrot, axes='szxy') # unity applies euler angles in z, x, y ordering
pc = np.dot(pc, R.T)
toppled = self.data.toppled[idx]
shape_name = self.data.shape_name[idx]
mass = self.data.mass[idx]
body_fric = -1.0
if self.use_body_friction:
body_fric = self.data.body_friction[idx]
meta_info = (toppled, shape_name, scale, euler_rot_out, body_fric, mass)
if not self.use_aa:
delta_rot_out = None
if not self.use_aa_split:
delta_rot_split_out = None
if not self.use_topple_idx:
topple_label_out = None
if not self.use_delta_quat:
delta_quat_out = None
return pc, lin_vel_out, ang_vel_out, pos_out, rot_out, delta_quat_out, delta_rot_out, delta_rot_split_out, topple_label_out, meta_info
def get_norm_info(self):
return self.norm_info
if __name__=='__main__':
# norm_info = ToppleNormalizationInfo()
# norm_info.load_from('../../data/sim/normalization_info/cube_train.pkl')
# norm_info.print_out()
topple_data = ToppleDataset(roots=['./data/sim/Cube/Cube30k_ObjSplit/Cube30kVal'], norm_info_file='./data/sim/normalization_info/cube_30k.pkl', \
batch_size=5, num_steps=10, shuffle=True, num_pts=None, perturb_pts=0.01)
count = 0
while topple_data.has_next_batch():
batch = topple_data.next_batch(random_window=True, focus_toppling=False)
count += 1
# print(batch.lin_vel[0])
# print(batch.toppled[0])
# print(batch.delta_rot_split[0])
# print(batch.delta_rot[0])
# print(batch.topple_label[0])
# print(batch.pos)
# print(batch.body_friction)
# print(batch.delta_quat[0])
# print(np.degrees(2*np.arccos(batch.delta_quat[0, :, 0])))
print('Total num batches: ' + str(count))
topple_data.reset()
count = 0
while topple_data.has_next_batch():
batch = topple_data.next_batch()
count += 1
print(batch.size)
print('Total num batches: ' + str(count))
| 2.421875 | 2 |
Part1/AverageAccuracy.py | efkandurakli/Graduation-Project1 | 1 | 2957 | import numpy as np
from operator import truediv
def AA_andEachClassAccuracy(confusion_matrix):
counter = confusion_matrix.shape[0]
list_diag = np.diag(confusion_matrix)
list_raw_sum = np.sum(confusion_matrix, axis=1)
each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))
average_acc = np.mean(each_acc)
return each_acc, average_acc | 2.515625 | 3 |
scripts/sct_apply_transfo.py | YangHee-Min/spinalcordtoolbox | 0 | 2958 | #!/usr/bin/env python
#########################################################################################
#
# Apply transformations. This function is a wrapper for sct_WarpImageMultiTransform
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: <NAME>, <NAME>
# Modified: 2014-07-20
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: display message at the end
# TODO: interpolation methods
from __future__ import division, absolute_import
import sys, io, os, time, functools
from msct_parser import Parser
import sct_utils as sct
import sct_convert
import sct_image
import spinalcordtoolbox.image as msct_image
from sct_crop_image import ImageCropper
class Param:
def __init__(self):
self.verbose = '1'
self.remove_temp_files = '1'
# PARSER
# ==========================================================================================
def get_parser():
# parser initialisation
parser = Parser(__file__)
parser.usage.set_description('Apply transformations. This function is a wrapper for antsApplyTransforms (ANTs).')
parser.add_option(name="-i",
type_value="file",
description="input image",
mandatory=True,
example="t2.nii.gz")
parser.add_option(name="-d",
type_value="file",
description="destination image",
mandatory=True,
example="out.nii.gz")
parser.add_option(name="-w",
type_value=[[','], "file"],
description="Transformation, which can be a warping field (nifti image) or an affine transformation matrix (text file).",
mandatory=True,
example="warp1.nii.gz,warp2.nii.gz")
parser.add_option(name="-crop",
type_value="multiple_choice",
description="Crop Reference. 0 : no reference. 1 : sets background to 0. 2 : use normal background",
mandatory=False,
default_value='0',
example=['0', '1', '2'])
parser.add_option(name="-c",
type_value=None,
description="Crop Reference. 0 : no reference. 1 : sets background to 0. 2 : use normal background",
mandatory=False,
deprecated_by='-crop')
parser.add_option(name="-o",
type_value="file_output",
description="registered source.",
mandatory=False,
default_value='',
example="dest.nii.gz")
parser.add_option(name="-x",
type_value="multiple_choice",
description="interpolation method",
mandatory=False,
default_value='spline',
example=['nn', 'linear', 'spline'])
parser.add_option(name="-r",
type_value="multiple_choice",
description="""Remove temporary files.""",
mandatory=False,
default_value='1',
example=['0', '1'])
parser.add_option(name="-v",
type_value="multiple_choice",
description="""Verbose.""",
mandatory=False,
default_value='1',
example=['0', '1', '2'])
return parser
class Transform:
def __init__(self, input_filename, warp, fname_dest, output_filename='', verbose=0, crop=0, interp='spline', remove_temp_files=1, debug=0):
self.input_filename = input_filename
if isinstance(warp, str):
self.warp_input = list([warp])
else:
self.warp_input = warp
self.fname_dest = fname_dest
self.output_filename = output_filename
self.interp = interp
self.crop = crop
self.verbose = verbose
self.remove_temp_files = remove_temp_files
self.debug = debug
def apply(self):
# Initialization
fname_src = self.input_filename # source image (moving)
fname_warp_list = self.warp_input # list of warping fields
fname_out = self.output_filename # output
fname_dest = self.fname_dest # destination image (fix)
verbose = self.verbose
remove_temp_files = self.remove_temp_files
crop_reference = self.crop # if = 1, put 0 everywhere around warping field, if = 2, real crop
interp = sct.get_interpolation('isct_antsApplyTransforms', self.interp)
# Parse list of warping fields
sct.printv('\nParse list of warping fields...', verbose)
use_inverse = []
fname_warp_list_invert = []
# fname_warp_list = fname_warp_list.replace(' ', '') # remove spaces
# fname_warp_list = fname_warp_list.split(",") # parse with comma
for idx_warp, path_warp in enumerate(fname_warp_list):
# Check if inverse matrix is specified with '-' at the beginning of file name
if path_warp.startswith("-"):
use_inverse.append('-i')
fname_warp_list[idx_warp] = path_warp[1:] # remove '-'
fname_warp_list_invert += [[use_inverse[idx_warp], fname_warp_list[idx_warp]]]
else:
use_inverse.append('')
fname_warp_list_invert += [[path_warp]]
path_warp = fname_warp_list[idx_warp]
if path_warp.endswith((".nii", ".nii.gz")) \
and msct_image.Image(fname_warp_list[idx_warp]).header.get_intent()[0] != 'vector':
raise ValueError("Displacement field in {} is invalid: should be encoded" \
" in a 5D file with vector intent code" \
" (see https://nifti.nimh.nih.gov/pub/dist/src/niftilib/nifti1.h" \
.format(path_warp))
# need to check if last warping field is an affine transfo
isLastAffine = False
path_fname, file_fname, ext_fname = sct.extract_fname(fname_warp_list_invert[-1][-1])
if ext_fname in ['.txt', '.mat']:
isLastAffine = True
# check if destination file is 3d
if not sct.check_if_3d(fname_dest):
sct.printv('ERROR: Destination data must be 3d')
# N.B. Here we take the inverse of the warp list, because sct_WarpImageMultiTransform concatenates in the reverse order
fname_warp_list_invert.reverse()
fname_warp_list_invert = functools.reduce(lambda x,y: x+y, fname_warp_list_invert)
# Extract path, file and extension
path_src, file_src, ext_src = sct.extract_fname(fname_src)
path_dest, file_dest, ext_dest = sct.extract_fname(fname_dest)
# Get output folder and file name
if fname_out == '':
path_out = '' # output in user's current directory
file_out = file_src + '_reg'
ext_out = ext_src
fname_out = os.path.join(path_out, file_out + ext_out)
# Get dimensions of data
sct.printv('\nGet dimensions of data...', verbose)
img_src = msct_image.Image(fname_src)
nx, ny, nz, nt, px, py, pz, pt = img_src.dim
# nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(fname_src)
sct.printv(' ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz) + ' x ' + str(nt), verbose)
# if 3d
if nt == 1:
# Apply transformation
sct.printv('\nApply transformation...', verbose)
if nz in [0, 1]:
dim = '2'
else:
dim = '3'
sct.run(['isct_antsApplyTransforms',
'-d', dim,
'-i', fname_src,
'-o', fname_out,
'-t',
] + fname_warp_list_invert + [
'-r', fname_dest,
] + interp, verbose=verbose, is_sct_binary=True)
# if 4d, loop across the T dimension
else:
path_tmp = sct.tmp_create(basename="apply_transfo", verbose=verbose)
# convert to nifti into temp folder
sct.printv('\nCopying input data to tmp folder and convert to nii...', verbose)
img_src.save(os.path.join(path_tmp, "data.nii"))
sct.copy(fname_dest, os.path.join(path_tmp, file_dest + ext_dest))
fname_warp_list_tmp = []
for fname_warp in fname_warp_list:
path_warp, file_warp, ext_warp = sct.extract_fname(fname_warp)
sct.copy(fname_warp, os.path.join(path_tmp, file_warp + ext_warp))
fname_warp_list_tmp.append(file_warp + ext_warp)
fname_warp_list_invert_tmp = fname_warp_list_tmp[::-1]
curdir = os.getcwd()
os.chdir(path_tmp)
# split along T dimension
sct.printv('\nSplit along T dimension...', verbose)
im_dat = msct_image.Image('data.nii')
im_header = im_dat.hdr
data_split_list = sct_image.split_data(im_dat, 3)
for im in data_split_list:
im.save()
# apply transfo
sct.printv('\nApply transformation to each 3D volume...', verbose)
for it in range(nt):
file_data_split = 'data_T' + str(it).zfill(4) + '.nii'
file_data_split_reg = 'data_reg_T' + str(it).zfill(4) + '.nii'
status, output = sct.run(['isct_antsApplyTransforms',
'-d', '3',
'-i', file_data_split,
'-o', file_data_split_reg,
'-t',
] + fname_warp_list_invert_tmp + [
'-r', file_dest + ext_dest,
] + interp, verbose, is_sct_binary=True)
# Merge files back
sct.printv('\nMerge file back...', verbose)
import glob
path_out, name_out, ext_out = sct.extract_fname(fname_out)
# im_list = [Image(file_name) for file_name in glob.glob('data_reg_T*.nii')]
# concat_data use to take a list of image in input, now takes a list of file names to open the files one by one (see issue #715)
fname_list = glob.glob('data_reg_T*.nii')
fname_list.sort()
im_out = sct_image.concat_data(fname_list, 3, im_header['pixdim'])
im_out.save(name_out + ext_out)
os.chdir(curdir)
sct.generate_output_file(os.path.join(path_tmp, name_out + ext_out), fname_out)
# Delete temporary folder if specified
if int(remove_temp_files):
sct.printv('\nRemove temporary files...', verbose)
sct.rmtree(path_tmp, verbose=verbose)
# 2. crop the resulting image using dimensions from the warping field
warping_field = fname_warp_list_invert[-1]
# if last warping field is an affine transfo, we need to compute the space of the concatenate warping field:
if isLastAffine:
sct.printv('WARNING: the resulting image could have wrong apparent results. You should use an affine transformation as last transformation...', verbose, 'warning')
elif crop_reference == 1:
ImageCropper(input_file=fname_out, output_file=fname_out, ref=warping_field, background=0).crop()
# sct.run('sct_crop_image -i '+fname_out+' -o '+fname_out+' -ref '+warping_field+' -b 0')
elif crop_reference == 2:
ImageCropper(input_file=fname_out, output_file=fname_out, ref=warping_field).crop()
# sct.run('sct_crop_image -i '+fname_out+' -o '+fname_out+' -ref '+warping_field)
sct.display_viewer_syntax([fname_dest, fname_out], verbose=verbose)
# MAIN
# ==========================================================================================
def main(args=None):
# check user arguments
if not args:
args = sys.argv[1:]
# Get parser info
parser = get_parser()
arguments = parser.parse(args)
input_filename = arguments["-i"]
fname_dest = arguments["-d"]
warp_filename = arguments["-w"]
transform = Transform(input_filename=input_filename, fname_dest=fname_dest, warp=warp_filename)
if "-crop" in arguments:
transform.crop = arguments["-crop"]
if "-o" in arguments:
transform.output_filename = arguments["-o"]
if "-x" in arguments:
transform.interp = arguments["-x"]
if "-r" in arguments:
transform.remove_temp_files = int(arguments["-r"])
transform.verbose = int(arguments.get('-v'))
sct.init_sct(log_level=transform.verbose, update=True) # Update log level
transform.apply()
# START PROGRAM
# ==========================================================================================
if __name__ == "__main__":
sct.init_sct()
# # initialize parameters
param = Param()
# call main function
main()
| 1.914063 | 2 |
tests/plugins/test_plugin_base.py | vurankar/mongo-connector | 1 | 2959 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests methods in plugin_base.py
"""
import copy
import sys
sys.path[0:0] = [""]
from mongo_connector.plugins.plugin_base import PluginBase
from tests import unittest
from tests.plugins.helpers import (BAD_PLUGIN_CONFIGS, get_test_namespace)
class TestPluginBase(unittest.TestCase):
""" Tests the utils
"""
def setUp(self):
"""Initialize test instance.
"""
self.namespace = get_test_namespace()
def test_name(self):
"""Test name.
"""
configs = self.namespace.plugins[0]
for cfg in configs:
obj = PluginBase(cfg)
self.assertEqual(cfg['pluginName'], obj.name())
for cfg in BAD_PLUGIN_CONFIGS:
obj = PluginBase(cfg)
self.assertEqual(obj.name().index('generated'), 0)
def test_info(self):
"""Test info.
"""
configs = self.namespace.plugins[0]
for cfg in configs:
obj = PluginBase(cfg)
self.assertEqual(cfg['config'], obj.info())
for cfg in BAD_PLUGIN_CONFIGS:
obj = PluginBase(cfg)
self.assertEqual(obj.info(), {})
def _test_not_implemented_method_by_name(self, name):
"""Test not implemented method by name.
"""
configs = copy.deepcopy(self.namespace.plugins)
configs.extend(BAD_PLUGIN_CONFIGS)
for cfg in configs:
obj = PluginBase(cfg)
try:
method = getattr(obj, name)
if not method or not callable(method):
raise KeyError
method()
except NotImplementedError as exc:
pass
return True
def test_invoke(self):
"""Test invoke.
"""
flag = self._test_not_implemented_method_by_name('invoke')
self.assertEqual(flag, True)
def test_bulk_invoke(self):
"""Test bulk_invoke.
"""
# Bulk invoke is really implemented but it calls invoke in loop
# which returns an not implemented exception.
flag = self._test_not_implemented_method_by_name('bulk_invoke')
self.assertEqual(flag, True)
def test_commit(self):
"""Test commit.
"""
flag = self._test_not_implemented_method_by_name('commit')
self.assertEqual(flag, True)
def test_stop(self):
"""Test stop.
"""
flag = self._test_not_implemented_method_by_name('stop')
self.assertEqual(flag, True)
if __name__ == '__main__':
unittest.main()
| 2.125 | 2 |
address/models.py | PerchLive/django-address | 0 | 2960 | import logging
import sys
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.utils.encoding import python_2_unicode_compatible
try:
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor
except ImportError:
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor as ForwardManyToOneDescriptor
logger = logging.getLogger(__name__)
if sys.version > '3':
long = int
basestring = (str, bytes)
unicode = str
__all__ = ['Country', 'State', 'Locality', 'Address', 'AddressField']
class InconsistentDictError(Exception):
pass
def _to_python(value):
raw = value.get('raw', '')
country = value.get('country', '')
country_code = value.get('country_code', '')
state = value.get('state', '')
state_code = value.get('state_code', '')
locality = value.get('locality', '')
sublocality = value.get('sublocality', '')
postal_code = value.get('postal_code', '')
street_number = value.get('street_number', '')
route = value.get('route', '')
formatted = value.get('formatted', '')
latitude = value.get('latitude', None)
longitude = value.get('longitude', None)
# If there is no value (empty raw) then return None.
if not raw:
return None
# Fix issue with NYC boroughs (https://code.google.com/p/gmaps-api-issues/issues/detail?id=635)
if not locality and sublocality:
locality = sublocality
# If we have an inconsistent set of value bail out now.
if (country or state or locality) and not (country and state and locality):
raise InconsistentDictError
# Handle the country.
try:
country_obj = Country.objects.get(name=country)
except Country.DoesNotExist:
if country:
if len(country_code) > Country._meta.get_field('code').max_length:
if country_code != country:
raise ValueError('Invalid country code (too long): %s' % country_code)
country_code = ''
country_obj = Country.objects.create(name=country, code=country_code)
else:
country_obj = None
# Handle the state.
try:
state_obj = State.objects.get(name=state, country=country_obj)
except State.DoesNotExist:
if state:
if len(state_code) > State._meta.get_field('code').max_length:
if state_code != state:
raise ValueError('Invalid state code (too long): %s' % state_code)
state_code = ''
state_obj = State.objects.create(name=state, code=state_code, country=country_obj)
else:
state_obj = None
# Handle the locality.
try:
locality_obj = Locality.objects.get(name=locality, postal_code=postal_code, state=state_obj)
except Locality.DoesNotExist:
if locality:
locality_obj = Locality.objects.create(name=locality, postal_code=postal_code, state=state_obj)
else:
locality_obj = None
# Handle the address.
try:
if not (street_number or route or locality):
address_obj = Address.objects.get(raw=raw)
else:
address_obj = Address.objects.get(
street_number=street_number,
route=route,
locality=locality_obj
)
except Address.DoesNotExist:
address_obj = Address(
street_number=street_number,
route=route,
raw=raw,
locality=locality_obj,
formatted=formatted,
latitude=latitude,
longitude=longitude,
)
# If "formatted" is empty try to construct it from other values.
if not address_obj.formatted:
address_obj.formatted = unicode(address_obj)
# Need to save.
address_obj.save()
# Done.
return address_obj
##
# Convert a dictionary to an address.
##
def to_python(value):
# Keep `None`s.
if value is None:
return None
# Is it already an address object?
if isinstance(value, Address):
return value
# If we have an integer, assume it is a model primary key. This is mostly for
# Django being a cunt.
elif isinstance(value, (int, long)):
return value
# A string is considered a raw value.
elif isinstance(value, basestring):
obj = Address(raw=value)
obj.save()
return obj
# A dictionary of named address components.
elif isinstance(value, dict):
# Attempt a conversion.
try:
return _to_python(value)
except InconsistentDictError:
return Address.objects.create(raw=value['raw'])
# Not in any of the formats I recognise.
raise ValidationError('Invalid address value.')
##
# A country.
##
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=40, unique=True, blank=True)
code = models.CharField(max_length=2, blank=True) # not unique as there are duplicates (IT)
class Meta:
verbose_name_plural = 'Countries'
ordering = ('name',)
def __str__(self):
return '%s' % (self.name or self.code)
##
# A state. Google refers to this as `administration_level_1`.
##
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=165, blank=True)
code = models.CharField(max_length=3, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name='states')
class Meta:
unique_together = ('name', 'country')
ordering = ('country', 'name')
def __str__(self):
txt = self.to_str()
country = '%s' % self.country
if country and txt:
txt += ', '
txt += country
return txt
def to_str(self):
return '%s' % (self.name or self.code)
##
# A locality (suburb).
##
@python_2_unicode_compatible
class Locality(models.Model):
name = models.CharField(max_length=165, blank=True)
postal_code = models.CharField(max_length=10, blank=True)
state = models.ForeignKey(State, on_delete=models.CASCADE, related_name='localities')
class Meta:
verbose_name_plural = 'Localities'
unique_together = ('name', 'postal_code', 'state')
ordering = ('state', 'name')
def __str__(self):
txt = '%s' % self.name
state = self.state.to_str() if self.state else ''
if txt and state:
txt += ', '
txt += state
if self.postal_code:
txt += ' %s' % self.postal_code
cntry = '%s' % (self.state.country if self.state and self.state.country else '')
if cntry:
txt += ', %s' % cntry
return txt
##
# An address. If for any reason we are unable to find a matching
# decomposed address we will store the raw address string in `raw`.
##
@python_2_unicode_compatible
class Address(models.Model):
street_number = models.CharField(max_length=20, blank=True)
route = models.CharField(max_length=100, blank=True)
locality = models.ForeignKey(Locality, on_delete=models.CASCADE, related_name='addresses', blank=True, null=True)
raw = models.CharField(max_length=200)
formatted = models.CharField(max_length=200, blank=True)
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
verbose_name_plural = 'Addresses'
ordering = ('locality', 'route', 'street_number')
# unique_together = ('locality', 'route', 'street_number')
def __str__(self):
if self.formatted != '':
txt = '%s' % self.formatted
elif self.locality:
txt = ''
if self.street_number:
txt = '%s' % self.street_number
if self.route:
if txt:
txt += ' %s' % self.route
locality = '%s' % self.locality
if txt and locality:
txt += ', '
txt += locality
else:
txt = '%s' % self.raw
return txt
def clean(self):
if not self.raw:
raise ValidationError('Addresses may not have a blank `raw` field.')
def as_dict(self):
ad = dict(
street_number=self.street_number,
route=self.route,
raw=self.raw,
formatted=self.formatted,
latitude=self.latitude if self.latitude else '',
longitude=self.longitude if self.longitude else '',
)
if self.locality:
ad['locality'] = self.locality.name
ad['postal_code'] = self.locality.postal_code
if self.locality.state:
ad['state'] = self.locality.state.name
ad['state_code'] = self.locality.state.code
if self.locality.state.country:
ad['country'] = self.locality.state.country.name
ad['country_code'] = self.locality.state.country.code
return ad
class AddressDescriptor(ForwardManyToOneDescriptor):
def __set__(self, inst, value):
super(AddressDescriptor, self).__set__(inst, to_python(value))
##
# A field for addresses in other models.
##
class AddressField(models.ForeignKey):
description = 'An address'
def __init__(self, *args, **kwargs):
kwargs['to'] = 'address.Address'
super(AddressField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
from address.compat import compat_contribute_to_class
compat_contribute_to_class(self, cls, name, virtual_only)
# super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, AddressDescriptor(self))
# def deconstruct(self):
# name, path, args, kwargs = super(AddressField, self).deconstruct()
# del kwargs['to']
# return name, path, args, kwargs
def formfield(self, **kwargs):
from .forms import AddressField as AddressFormField
defaults = dict(form_class=AddressFormField)
defaults.update(kwargs)
return super(AddressField, self).formfield(**defaults)
| 2.171875 | 2 |
src/eavatar.ava/pod/mods/tasks/__init__.py | eavatar/ava | 0 | 2961 | <reponame>eavatar/ava
# -*- coding: utf-8 -*-
"""
Modules for exposing functions that can be run as tasks.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
| 1.101563 | 1 |
addons/hr_payroll_account/models/hr_payroll_account.py | jjiege/odoo | 0 | 2962 | <reponame>jjiege/odoo<filename>addons/hr_payroll_account/models/hr_payroll_account.py
#-*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools import float_compare, float_is_zero
class HrPayslipLine(models.Model):
_inherit = 'hr.payslip.line'
def _get_partner_id(self, credit_account):
"""
Get partner_id of slip line to use in account_move_line
"""
# use partner of salary rule or fallback on employee's address
register_partner_id = self.salary_rule_id.register_id.partner_id
partner_id = register_partner_id.id or self.slip_id.employee_id.address_home_id.id
if credit_account:
if register_partner_id or self.salary_rule_id.account_credit.internal_type in ('receivable', 'payable'):
return partner_id
else:
if register_partner_id or self.salary_rule_id.account_debit.internal_type in ('receivable', 'payable'):
return partner_id
return False
class HrPayslip(models.Model):
_inherit = 'hr.payslip'
date = fields.Date('Date Account', states={'draft': [('readonly', False)]}, readonly=True,
help="Keep empty to use the period of the validation(Payslip) date.")
journal_id = fields.Many2one('account.journal', 'Salary Journal', readonly=True, required=True,
states={'draft': [('readonly', False)]}, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
move_id = fields.Many2one('account.move', 'Accounting Entry', readonly=True, copy=False)
@api.model
def create(self, vals):
if 'journal_id' in self.env.context:
vals['journal_id'] = self.env.context.get('journal_id')
return super(HrPayslip, self).create(vals)
@api.onchange('contract_id')
def onchange_contract(self):
super(HrPayslip, self).onchange_contract()
self.journal_id = self.contract_id.journal_id.id or (not self.contract_id and self.default_get(['journal_id'])['journal_id'])
@api.multi
def action_payslip_cancel(self):
moves = self.mapped('move_id')
moves.filtered(lambda x: x.state == 'posted').button_cancel()
moves.unlink()
return super(HrPayslip, self).action_payslip_cancel()
@api.multi
def action_payslip_done(self):
res = super(HrPayslip, self).action_payslip_done()
for slip in self:
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
date = slip.date or slip.date_to
currency = slip.company_id.currency_id or slip.journal_id.company_id.currency_id
name = _('Payslip of %s') % (slip.employee_id.name)
move_dict = {
'narration': name,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'date': date,
}
for line in slip.details_by_salary_rule_category:
amount = currency.round(slip.credit_note and -line.total or line.total)
if currency.is_zero(amount):
continue
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=False),
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount > 0.0 and amount or 0.0,
'credit': amount < 0.0 and -amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id or slip.contract_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'partner_id': line._get_partner_id(credit_account=True),
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amount < 0.0 and -amount or 0.0,
'credit': amount > 0.0 and amount or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id.id or slip.contract_id.analytic_account_id.id,
'tax_line_id': line.salary_rule_id.account_tax_id.id,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if currency.compare_amounts(credit_sum, debit_sum) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Credit Account!') % (slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': 0.0,
'credit': currency.round(debit_sum - credit_sum),
})
line_ids.append(adjust_credit)
elif currency.compare_amounts(debit_sum, credit_sum) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Debit Account!') % (slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': currency.round(credit_sum - debit_sum),
'credit': 0.0,
})
line_ids.append(adjust_debit)
move_dict['line_ids'] = line_ids
move = self.env['account.move'].create(move_dict)
slip.write({'move_id': move.id, 'date': date})
move.post()
return res
class HrSalaryRule(models.Model):
_inherit = 'hr.salary.rule'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
account_tax_id = fields.Many2one('account.tax', 'Tax')
account_debit = fields.Many2one('account.account', 'Debit Account', domain=[('deprecated', '=', False)])
account_credit = fields.Many2one('account.account', 'Credit Account', domain=[('deprecated', '=', False)])
class HrContract(models.Model):
_inherit = 'hr.contract'
_description = 'Employee Contract'
analytic_account_id = fields.Many2one('account.analytic.account', 'Analytic Account')
journal_id = fields.Many2one('account.journal', 'Salary Journal')
class HrPayslipRun(models.Model):
_inherit = 'hr.payslip.run'
journal_id = fields.Many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True,
required=True, default=lambda self: self.env['account.journal'].search([('type', '=', 'general')], limit=1))
| 1.929688 | 2 |
ml_datasets/utils.py | abkoesdw/ml-datasets | 1 | 2963 | import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import BoundaryNorm
def plot_images(
num_sample_perclass=10, x=None, y=None, labels=None, title=None, cmap=None
):
grid_x = num_sample_perclass + 1
grid_y = len(labels)
plt.figure(figsize=(grid_y, grid_x))
gs1 = gridspec.GridSpec(grid_y, grid_x)
gs1.update(wspace=0.025, hspace=0.05)
font = {"family": "serif", "weight": "bold"}
plt.suptitle(title)
j = 0
for i in range(grid_y):
idxs = [0] + list(np.where(y == list(labels.keys())[i])[0][: grid_x - 1])
label = labels[list(labels.keys())[i]]
for k, idx in enumerate(idxs):
ax1 = plt.subplot(gs1[j])
if k == 0:
ax1.text(0, 0.25, label, ha="right", wrap=True, fontdict=font)
else:
ax1.imshow(x[idx, ...], cmap=cmap)
plt.axis("off")
j += 1
plt.show()
def plot_2D(x, y, title, axis="off"):
BLUE, ORANGE = "#57B5E8", "#E69E00"
plt.figure(figsize=(8, 8))
plt.scatter(
x[:, 0],
x[:, 1],
s=18,
facecolors="none",
edgecolors=np.array([BLUE, ORANGE])[y],
)
if axis == "off":
plt.axis("off")
elif axis == "on":
plt.xlabel("x_1")
plt.ylabel("x_2")
else:
print("incorrect values for arg: axis (on or off only)")
sys.exit()
plt.title(title)
plt.show()
def plot_dna(df, label):
matrix = df.values
col_names = df.columns
rows = np.arange(matrix.shape[0])
cols = np.arange(matrix.shape[1])
np.random.seed(3)
np.random.shuffle(rows)
np.random.shuffle(cols)
matrix = matrix[:, cols[:100]].T
matrix = matrix[:, rows]
col_names = col_names[cols[:100]]
label = label[rows]
mat_min = np.min(matrix)
mat_max = np.max(matrix)
mat_min = -np.max([np.abs(mat_min), mat_max])
mat_max = np.max([np.abs(mat_min), mat_max])
matrix = np.ma.masked_where(np.abs(matrix) <= 0.3, matrix)
plt.figure(figsize=(6, 12))
cmap_list = ["red", "darkred", "green", "lime", "lightgreen"]
cmap = LinearSegmentedColormap.from_list("Custom cmap", cmap_list, len(cmap_list))
cmap.set_bad("black")
bounds = np.linspace(
mat_min + 6, mat_max - 6, 5
) # np.arange(mat_min + 6, mat_max - 6, 0.1)
idx = np.searchsorted(bounds, 0)
bounds = np.insert(bounds, idx, 0)
norm = BoundaryNorm(bounds, cmap.N)
plt.imshow(matrix, cmap=cmap, norm=norm)
plt.xticks(np.arange(len(label)))
plt.yticks(np.arange(len(col_names)))
ax = plt.gca()
ax.set_xticklabels(label, rotation=90)
ax.set_yticklabels(col_names)
ax.yaxis.tick_right()
ax.tick_params(axis=u"both", which=u"both", labelsize=5, length=0.0)
plt.tight_layout()
fig = plt.gcf()
# fig.set_size_inches((6, 12), forward=False)
# fig.savefig("img/dna.png", dpi=200)
plt.show()
| 2.5625 | 3 |
Simulator/simulator.py | MasterRadule/DefenceFirst | 0 | 2964 | import logging
import os
import random
from abc import ABC, abstractmethod
from random import randint
from time import sleep, strftime
HOSTNAME = ['defence-first.rs', 'defence-first.de', 'defence-first.ru']
HOSTIP = ['172.16.17.32', '192.168.127.12', '172.16.58.3']
SOURCEIP = ['192.168.3.11', '192.168.127.12', '172.16.58.3', '172.16.58.3', '172.16.17.32']
USERNAMES = ['user1', 'user2', 'user3', 'user4', 'user5']
FACILITY = ['KERN', 'USER', 'MAIL', 'DAEMON', 'AUTH', 'SYSLOG', 'LPR', 'NEWS',
'UUCP', 'CLOCK_DAEMON', 'AUTHPRIV', 'FTP', 'NTP', 'LOGAUDIT', 'LOGALERT',
'CRON', 'LOCAL0', 'LOCAL1', 'LOCAL2', 'LOCAL3', 'LOCAL4', 'LOCAL5', 'LOCAL6', 'LOCAL7']
SEVERITY = ['DEBUG', 'INFORMATIONAL', 'NOTICE', 'WARNING', 'ERROR', 'CRITICAL', 'ALERT', 'EMERGENCY']
FORMAT = '%(asctime)s %(hostname)s-Application-%(hostip)s-%(sourceip)s %(severity)s-%(facility)s %(' \
'message)s '
RESOURCES = ['index.html', 'document.xml', 'dashboard.html']
LOGS_PATH = 'logs'
class State(ABC):
@abstractmethod
def run(self, context):
return NotImplemented
class DoSAttack(State):
def run(self, context):
d = {'hostname': HOSTNAME[0], 'hostip': HOSTIP[0], 'severity': SEVERITY[1],
'facility': FACILITY[1]}
http_response_code = '200'
for i in range(25):
if i >= 20:
http_response_code = '503'
d['severity'] = SEVERITY[5]
for sourceip in SOURCEIP:
d['sourceip'] = sourceip
context.logger.info('Requested resource index.html {}'.format(http_response_code), extra=d)
context.state = NormalState()
class NormalState(State):
def run(self, context):
normal = {'hostname': HOSTNAME[1], 'hostip': HOSTIP[1], 'severity': SEVERITY[1],
'facility': FACILITY[1]}
while True:
normal['sourceip'] = random.choice(SOURCEIP)
if random.random() < 0.3:
context.logger.info(
'Successful authorization on username "{}"'.format(USERNAMES[SOURCEIP.index(normal['sourceip'])]),
extra=normal)
else:
context.logger.info('Requested resource {} 200'.format(random.choice(RESOURCES)), extra=normal)
sleep(1)
if random.random() < 0.1:
rand = randint(1, 3)
if rand == 1:
context.state = DoSAttack()
elif rand == 2:
context.state = BruteForce()
elif rand == 3:
context.state = DatabaseError()
context.state.run(context)
class BruteForce(State):
def run(self, context):
attack = {'hostname': HOSTNAME[1], 'hostip': HOSTIP[1], 'sourceip': SOURCEIP[0], 'severity': SEVERITY[2],
'facility': FACILITY[4]}
normal = {'hostname': HOSTNAME[1], 'hostip': HOSTIP[1], 'severity': SEVERITY[1],
'facility': FACILITY[1]}
for i in range(30):
if i > 5:
attack['severity'] = SEVERITY[3]
if random.random() < 0.45:
normal['sourceip'] = random.choice(SOURCEIP)
context.logger.info('Requested resource {} 200'.format(random.choice(RESOURCES)), extra=normal)
sleep(0.5)
context.logger.info('Failed authorization on username "user1"', extra=attack)
sleep(0.5)
context.state = NormalState()
class DatabaseError(State):
def run(self, context):
d = {'hostname': HOSTNAME[2], 'hostip': HOSTIP[2], 'sourceip': SOURCEIP[0], 'severity': SEVERITY[4],
'facility': FACILITY[3]}
context.logger.info('Database error', extra=d)
sleep(1)
context.state = NormalState()
class Context:
def __init__(self):
self.state = NormalState()
formatter = logging.Formatter(FORMAT, "%Y-%m-%d %H:%M:%S")
logger = logging.getLogger('simulator')
if not os.path.exists(LOGS_PATH):
os.mkdir(LOGS_PATH)
fileHandler = logging.FileHandler(
os.path.join(LOGS_PATH, 'application_log-{}.log'.format(strftime('%Y-%m-%d'))))
fileHandler.setFormatter(formatter)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
logger.setLevel(logging.INFO)
self.logger = logger
def run(self):
self.state.run(self)
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
if __name__ == '__main__':
sm = Context()
sm.run()
| 2.296875 | 2 |
bayes_race/pp/__init__.py | DaniMarts/bayesrace | 23 | 2965 | <gh_stars>10-100
from bayes_race.pp.pure_pursuit import purePursuit | 1.0625 | 1 |
pysteam/evaluator/vector_space_error_eval.py | utiasASRL/pysteam | 5 | 2966 | <reponame>utiasASRL/pysteam
from typing import Optional
import numpy as np
from . import Evaluator
from ..state import VectorSpaceStateVar
class VectorSpaceErrorEval(Evaluator):
"""Error evaluator for a measured vector space state variable"""
def __init__(self, meas: np.ndarray, state_vec: VectorSpaceStateVar) -> None:
super().__init__()
self._meas: np.ndarray = meas
self._state_vec: VectorSpaceStateVar = state_vec
def is_active(self):
return not self._state_vec.locked
def evaluate(self, lhs: Optional[np.ndarray] = None):
error = self._meas - self._state_vec.value
if lhs is None:
return error
assert lhs.shape[-1] == self._state_vec.perturb_dim
jacs = dict()
if not self._state_vec.locked:
jacs = {self._state_vec.key: -lhs}
return error, jacs | 2.578125 | 3 |
torch_geometric/nn/unpool/__init__.py | mwussow/pytorch_geometric | 13 | 2967 | <reponame>mwussow/pytorch_geometric
from .knn_interpolate import knn_interpolate
__all__ = [
'knn_interpolate',
]
| 1.328125 | 1 |
bc4py/bip32/utils.py | namuyan/bc4py | 12 | 2968 | from bc4py_extension import PyAddress
import hashlib
def is_address(ck: PyAddress, hrp, ver):
"""check bech32 format and version"""
try:
if ck.hrp != hrp:
return False
if ck.version != ver:
return False
except ValueError:
return False
return True
def get_address(pk, hrp, ver) -> PyAddress:
"""get address from public key"""
identifier = hashlib.new('ripemd160', hashlib.sha256(pk).digest()).digest()
return PyAddress.from_param(hrp, ver, identifier)
def convert_address(ck: PyAddress, hrp, ver) -> PyAddress:
"""convert address's version"""
return PyAddress.from_param(hrp, ver, ck.identifier())
def dummy_address(dummy_identifier) -> PyAddress:
assert len(dummy_identifier) == 20
return PyAddress.from_param('dummy', 0, dummy_identifier)
__all__ = [
"is_address",
"get_address",
"convert_address",
"dummy_address",
]
| 2.609375 | 3 |
cubi_tk/snappy/kickoff.py | LaborBerlin/cubi-tk | 0 | 2969 | <reponame>LaborBerlin/cubi-tk<filename>cubi_tk/snappy/kickoff.py
"""``cubi-tk snappy kickoff``: kickoff SNAPPY pipeline."""
import argparse
import os
import subprocess
import typing
from logzero import logger
from toposort import toposort
from . import common
from cubi_tk.exceptions import ParseOutputException
def run(
args, _parser: argparse.ArgumentParser, _subparser: argparse.ArgumentParser
) -> typing.Optional[int]:
logger.info("Try to find SNAPPY pipeline directory...")
try:
path = common.find_snappy_root_dir(args.path or os.getcwd(), common.DEPENDENCIES.keys())
except common.CouldNotFindPipelineRoot:
return 1
# TODO: this assumes standard naming which is a limitation...
logger.info("Looking for pipeline directories (assuming standard naming)...")
logger.debug("Looking in %s", path)
step_set = {name for name in common.DEPENDENCIES if (path / name).exists()}
steps: typing.List[str] = []
for names in toposort({k: set(v) for k, v in common.DEPENDENCIES.items()}):
steps += [name for name in names if name in step_set]
logger.info("Will run the steps: %s", ", ".join(steps))
logger.info("Submitting with sbatch...")
jids: typing.Dict[str, str] = {}
for step in steps:
dep_jids = [jids[dep] for dep in common.DEPENDENCIES[step] if dep in jids]
cmd = ["sbatch"]
if dep_jids:
cmd += ["--dependency", "afterok:%s" % ":".join(map(str, dep_jids))]
cmd += ["pipeline_job.sh"]
logger.info("Submitting step %s: %s", step, " ".join(cmd))
if args.dry_run:
jid = "<%s>" % step
else:
stdout_raw = subprocess.check_output(cmd, cwd=str(path / step), timeout=args.timeout)
stdout = stdout_raw.decode("utf-8")
if not stdout.startswith("Submitted batch job "):
raise ParseOutputException("Did not understand sbatch output: %s" % stdout)
jid = stdout.split()[-1]
logger.info(" => JID: %s", jid)
jids[step] = jid
return None
def setup_argparse(parser: argparse.ArgumentParser) -> None:
"""Setup argument parser for ``cubi-tk snappy pull-sheet``."""
parser.add_argument("--hidden-cmd", dest="snappy_cmd", default=run, help=argparse.SUPPRESS)
parser.add_argument(
"--dry-run",
"-n",
default=False,
action="store_true",
help="Perform dry-run, do not do anything.",
)
parser.add_argument(
"--timeout", default=10, type=int, help="Number of seconds to wait for commands."
)
parser.add_argument(
"path",
nargs="?",
help="Path into SNAPPY directory (below a directory containing .snappy_pipeline).",
)
| 2.296875 | 2 |
tests/test_autotuner.py | RajatRasal/devito | 0 | 2970 | <filename>tests/test_autotuner.py
from __future__ import absolute_import
from functools import reduce
from operator import mul
try:
from StringIO import StringIO
except ImportError:
# Python3 compatibility
from io import StringIO
import pytest
from conftest import skipif_yask
import numpy as np
from devito import Grid, Function, TimeFunction, Eq, Operator, configuration, silencio
from devito.logger import logger, logging
@silencio(log_level='DEBUG')
@skipif_yask
@pytest.mark.parametrize("shape,expected", [
((30, 30), 17),
((30, 30, 30), 21)
])
def test_at_is_actually_working(shape, expected):
"""
Check that autotuning is actually running when switched on,
in both 2D and 3D operators.
"""
grid = Grid(shape=shape)
buffer = StringIO()
temporary_handler = logging.StreamHandler(buffer)
logger.addHandler(temporary_handler)
infield = Function(name='infield', grid=grid)
infield.data[:] = np.arange(reduce(mul, shape), dtype=np.int32).reshape(shape)
outfield = Function(name='outfield', grid=grid)
stencil = Eq(outfield.indexify(), outfield.indexify() + infield.indexify()*3.0)
op = Operator(stencil, dle=('blocking', {'blockinner': True, 'blockalways': True}))
# Expected 3 AT attempts for the given shape
op(infield=infield, outfield=outfield, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == 4
# Now try the same with aggressive autotuning, which tries 9 more cases
configuration['autotuning'] = 'aggressive'
op(infield=infield, outfield=outfield, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == expected
configuration['autotuning'] = configuration._defaults['autotuning']
logger.removeHandler(temporary_handler)
temporary_handler.flush()
temporary_handler.close()
buffer.flush()
buffer.close()
@silencio(log_level='DEBUG')
@skipif_yask
def test_timesteps_per_at_run():
"""
Check that each autotuning run (ie with a given block shape) takes
``autotuning.core.options['at_squeezer']`` timesteps, for an operator
performing the increment ``a[t + timeorder, ...] = f(a[t, ...], ...)``.
"""
from devito.core.autotuning import options
buffer = StringIO()
temporary_handler = logging.StreamHandler(buffer)
logger.addHandler(temporary_handler)
shape = (30, 30, 30)
grid = Grid(shape=shape)
x, y, z = grid.dimensions
t = grid.stepping_dim
# Function
infield = Function(name='infield', grid=grid)
infield.data[:] = np.arange(reduce(mul, shape), dtype=np.int32).reshape(shape)
outfield = Function(name='outfield', grid=grid)
stencil = Eq(outfield.indexify(), outfield.indexify() + infield.indexify()*3.0)
op = Operator(stencil, dle=('blocking', {'blockalways': True}))
op(infield=infield, outfield=outfield, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == 4
assert all('in 1 timesteps' in i for i in out)
buffer.truncate(0)
# TimeFunction with increasing time order; increasing the time order
# shouldn't affect how many iterations the autotuner is gonna run
for to in [1, 2, 4]:
infield = TimeFunction(name='infield', grid=grid, time_order=to)
infield.data[:] = np.arange(reduce(mul, infield.shape),
dtype=np.int32).reshape(infield.shape)
outfield = TimeFunction(name='outfield', grid=grid, time_order=to)
stencil = Eq(outfield.indexed[t + to, x, y, z],
outfield.indexify() + infield.indexify()*3.0)
op = Operator(stencil, dle=('blocking', {'blockalways': True}))
op(infield=infield, outfield=outfield, t=2, autotune=True)
out = [i for i in buffer.getvalue().split('\n') if 'took' in i]
assert len(out) == 4
assert all('in %d timesteps' % options['at_squeezer'] in i for i in out)
buffer.truncate(0)
logger.removeHandler(temporary_handler)
temporary_handler.flush()
temporary_handler.close()
buffer.flush()
buffer.close()
| 2.125 | 2 |
projects/CharGrid/data/bizcard2coco.py | timctho/detectron2-chargrid | 3 | 2971 | from data.data_reader import BIZCARD_LABEL_MAP, BizcardDataParser
import argparse
from pathlib import Path
import os
import json
import cv2
import numpy as np
def convert_bizcard_to_coco_format(image_dir, json_dir, id_list, out_dir, out_name):
coco_json = {}
images = []
annotations = []
categories = []
for _, key in enumerate(BIZCARD_LABEL_MAP.keys()):
categories.append({
'id': BIZCARD_LABEL_MAP[key],
'name': key
})
with open(id_list) as fp:
ids = fp.readlines()
for idx, file_id in enumerate(ids):
file_id = Path(file_id.strip())
print(idx, file_id)
if not (image_dir / file_id).with_suffix('.jpg').exists():
file_id = file_id.with_suffix('.jpeg')
else:
file_id = file_id.with_suffix('.jpg')
height, width = cv2.imread(str(image_dir / file_id)).shape[:2]
images.append({
'file_name': str(file_id),
'id': idx,
'height': height,
'width': width
})
try:
gt = BizcardDataParser.parse_data(str((json_dir / file_id).with_suffix('.json')), str(image_dir / file_id))[
0]
for word in gt.words:
anno = {
'id': len(annotations),
'image_id': idx,
'bbox': [word.bbox.min_x, word.bbox.min_y, (word.bbox.max_x - word.bbox.min_x),
(word.bbox.max_y - word.bbox.min_y)],
'segmentation': [word.bbox.val],
'category_id': word.label,
'iscrowd': 0,
'area': cv2.contourArea(np.reshape(word.bbox.val, [-1, 2]).astype(np.float32))
}
annotations.append(anno)
except Exception as e:
print(e)
print(str(image_dir / file_id))
coco_json['images'] = images
coco_json['annotations'] = annotations
coco_json['categories'] = categories
with open(Path(out_dir, out_name), 'w') as f:
json.dump(coco_json, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_dir', type=str)
parser.add_argument('--gt_dir', type=str)
parser.add_argument('--data_list', type=str)
parser.add_argument('--out_dir', type=str)
parser.add_argument('--out_name', type=str)
args = parser.parse_args()
if not Path(args.out_dir).exists():
Path(args.out_dir).mkdir()
convert_bizcard_to_coco_format(
Path(args.img_dir),
Path(args.gt_dir),
args.data_list,
args.out_dir,
args.out_name)
| 2.578125 | 3 |
deckz/cli/run.py | m09/deckz | 0 | 2972 | <reponame>m09/deckz
from pathlib import Path
from typing import List, Optional
from typer import Argument
from deckz.cli import app
from deckz.paths import Paths
from deckz.running import run as running_run
@app.command()
def run(
targets: Optional[List[str]] = Argument(None),
handout: bool = True,
presentation: bool = True,
print: bool = True,
deck_path: Path = Path("."),
) -> None:
"""Compile main targets."""
paths = Paths.from_defaults(deck_path)
running_run(
paths=paths,
build_handout=handout,
build_presentation=presentation,
build_print=print,
target_whitelist=targets,
)
| 2.359375 | 2 |
postgresqleu/confreg/templatetags/miscutil.py | dlangille/pgeu-system | 0 | 2973 | <gh_stars>0
from django import template
register = template.Library()
@register.filter(name='isboolean')
def isboolean(value):
return isinstance(value, bool)
@register.filter(name='vartypename')
def vartypename(value):
return type(value).__name__
| 2.0625 | 2 |
chat.py | rchampa/chat-server | 0 | 2974 | import asyncio
import contextvars
import aioredis
import uvloop
from aioredis import Redis
from fastapi import FastAPI
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.staticfiles import StaticFiles
from RLog import rprint
from routers import apirest, websockets
REDIS_HOST = 'redis'
REDIS_PORT = 6379
PORT = 9080
HOST = "0.0.0.0"
cvar_redis = contextvars.ContextVar('redis', default=None)
class CustomHeaderMiddleware(BaseHTTPMiddleware):
def __init__(self, app, header_value='Example'):
rprint('__init__')
super().__init__(app)
self.header_value = header_value
async def dispatch(self, request, call_next):
response = await call_next(request)
response.headers['Custom'] = self.header_value
return response
# uvloop is written in Cython and is built on top of libuv http://magic.io/blog/uvloop-blazing-fast-python-networking/
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
app.add_middleware(CustomHeaderMiddleware)
app.include_router(apirest.router)
app.include_router(websockets.router)
@app.on_event("startup")
async def handle_startup() -> None:
rprint("startup")
try:
pool = await aioredis.create_redis_pool((REDIS_HOST, REDIS_PORT), encoding='utf-8', maxsize=20)
cvar_redis.set(pool)
rprint("Connected to Redis on ", REDIS_HOST, REDIS_PORT)
except ConnectionRefusedError as e:
rprint('cannot connect to redis on:', REDIS_HOST, REDIS_PORT)
return
@app.on_event("shutdown")
async def handle_shutdown() -> None:
if cvar_redis.get() is not None:
redis: Redis = cvar_redis.get()
redis.close()
await redis.wait_closed()
rprint("closed connection Redis on ", REDIS_HOST, REDIS_PORT)
else:
rprint("ERROR: cvar_redis.get() devuelve NONE")
if __name__ == "__main__":
import uvicorn
rprint("Starting app")
rprint(dir(app))
rprint(app.url_path_for('websocket_endpoint'))
uvicorn.run('chat:app', host=HOST, port=PORT, log_level='info', reload=True)#, uds='uvicorn.sock')
| 2.21875 | 2 |
cli.py | abel-bernabeu/facecompressor | 2 | 2975 | <reponame>abel-bernabeu/facecompressor<gh_stars>1-10
import argparse
import autoencoder
def addTrainablesArg(parser):
parser.add_argument('--model', dest='model', help='Trained model', default='model.pt')
def addExchangeArg(parser):
parser.add_argument('--exchange', dest='exchange', help='File with exchanged data', required=True)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="action")
encode_parser = subparsers.add_parser('encode')
addTrainablesArg(encode_parser)
encode_parser.add_argument('--input', dest='input', help='Input image file name', required=True)
addExchangeArg(encode_parser)
decode_parser = subparsers.add_parser('decode')
addTrainablesArg(decode_parser)
addExchangeArg(decode_parser)
decode_parser.add_argument('--output', dest='output', help='Output image file name', required=True)
opts = parser.parse_args()
if opts.action == 'encode':
autoencoder.encode(opts.model, opts.input, opts.exchange)
elif opts.action == 'decode':
autoencoder.decode(opts.model, opts.exchange, opts.output)
| 2.609375 | 3 |
lib/bridgedb/email/request.py | liudonghua123/bridgedb | 0 | 2976 | <filename>lib/bridgedb/email/request.py
# -*- coding: utf-8; test-case-name: bridgedb.test.test_email_request; -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>> 0xA3ADB67A2CDB8B35
# <NAME> <<EMAIL>>
# please also see AUTHORS file
# :copyright: (c) 2007-2015, The Tor Project, Inc.
# (c) 2013-2015, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""
.. py:module:: bridgedb.email.request
:synopsis: Classes for parsing and storing information about requests for
bridges which are sent to the email distributor.
bridgedb.email.request
======================
Classes for parsing and storing information about requests for bridges
which are sent to the email distributor.
::
bridgedb.email.request
| |_ determineBridgeRequestOptions - Figure out which filters to apply, or
| offer help.
|_ EmailBridgeRequest - A request for bridges which was received through
the email distributor.
..
"""
from __future__ import print_function
from __future__ import unicode_literals
import logging
import re
from bridgedb import bridgerequest
from bridgedb.Dist import EmailRequestedHelp
from bridgedb.Dist import EmailRequestedKey
#: A regular expression for matching the Pluggable Transport method TYPE in
#: emailed requests for Pluggable Transports.
TRANSPORT_REGEXP = ".*transport ([a-z][_a-z0-9]*)"
TRANSPORT_PATTERN = re.compile(TRANSPORT_REGEXP)
#: A regular expression that matches country codes in requests for unblocked
#: bridges.
UNBLOCKED_REGEXP = ".*unblocked ([a-z]{2,4})"
UNBLOCKED_PATTERN = re.compile(UNBLOCKED_REGEXP)
def determineBridgeRequestOptions(lines):
"""Figure out which :class:`Bridges.BridgeFilter`s to apply, or offer help.
.. note:: If any ``'transport TYPE'`` was requested, or bridges not
blocked in a specific CC (``'unblocked CC'``), then the ``TYPE``
and/or ``CC`` will *always* be stored as a *lowercase* string.
:param list lines: A list of lines from an email, including the headers.
:raises EmailRequestedHelp: if the client requested help.
:raises EmailRequestedKey: if the client requested our GnuPG key.
:rtype: :class:`EmailBridgeRequest`
:returns: A :class:`~bridgerequst.BridgeRequest` with all of the requested
parameters set. The returned ``BridgeRequest`` will have already had
its filters generated via :meth:`~EmailBridgeRequest.generateFilters`.
"""
request = EmailBridgeRequest()
skippedHeaders = False
for line in lines:
line = line.strip().lower()
# Ignore all lines before the first empty line:
if not line: skippedHeaders = True
if not skippedHeaders: continue
if ("help" in line) or ("halp" in line):
raise EmailRequestedHelp("Client requested help.")
if "get" in line:
request.isValid(True)
logging.debug("Email request was valid.")
if "key" in line:
request.wantsKey(True)
raise EmailRequestedKey("Email requested a copy of our GnuPG key.")
if "ipv6" in line:
request.withIPv6()
if "transport" in line:
request.withPluggableTransportType(line)
if "unblocked" in line:
request.withoutBlockInCountry(line)
logging.debug("Generating hashring filters for request.")
request.generateFilters()
return request
class EmailBridgeRequest(bridgerequest.BridgeRequestBase):
"""We received a request for bridges through the email distributor."""
def __init__(self):
"""Process a new bridge request received through the
:class:`~bridgedb.Dist.EmailBasedDistributor`.
"""
super(EmailBridgeRequest, self).__init__()
self._isValid = False
self._wantsKey = False
def isValid(self, valid=None):
"""Get or set the validity of this bridge request.
If called without parameters, this method will return the current
state, otherwise (if called with the **valid** parameter), it will set
the current state of validity for this request.
:param bool valid: If given, set the validity state of this
request. Otherwise, get the current state.
"""
if valid is not None:
self._isValid = bool(valid)
return self._isValid
def wantsKey(self, wantsKey=None):
"""Get or set whether this bridge request wanted our GnuPG key.
If called without parameters, this method will return the current
state, otherwise (if called with the **wantsKey** parameter set), it
will set the current state for whether or not this request wanted our
key.
:param bool wantsKey: If given, set the validity state of this
request. Otherwise, get the current state.
"""
if wantsKey is not None:
self._wantsKey = bool(wantsKey)
return self._wantsKey
def withoutBlockInCountry(self, line):
"""This request was for bridges not blocked in **country**.
Add any country code found in the **line** to the list of
``notBlockedIn``. Currently, a request for a transport is recognized
if the email line contains the ``'unblocked'`` command.
:param str country: The line from the email wherein the client
requested some type of Pluggable Transport.
"""
unblocked = None
logging.debug("Parsing 'unblocked' line: %r" % line)
try:
unblocked = UNBLOCKED_PATTERN.match(line).group(1)
except (TypeError, AttributeError):
pass
if unblocked:
self.notBlockedIn.append(unblocked)
logging.info("Email requested bridges not blocked in: %r"
% unblocked)
def withPluggableTransportType(self, line):
"""This request included a specific Pluggable Transport identifier.
Add any Pluggable Transport method TYPE found in the **line** to the
list of ``transports``. Currently, a request for a transport is
recognized if the email line contains the ``'transport'`` command.
:param str line: The line from the email wherein the client
requested some type of Pluggable Transport.
"""
transport = None
logging.debug("Parsing 'transport' line: %r" % line)
try:
transport = TRANSPORT_PATTERN.match(line).group(1)
except (TypeError, AttributeError):
pass
if transport:
self.transports.append(transport)
logging.info("Email requested transport type: %r" % transport)
| 1.9375 | 2 |
test/com/facebook/buck/skylark/parser/testdata/rule_with_wrong_types/attr_value_type/subdir/foo.bzl | Unknoob/buck | 8,027 | 2977 | <gh_stars>1000+
""" Module docstring """
def _impl(_ctx):
""" Function docstring """
pass
some_rule = rule(
attrs = {
"attr1": attr.int(
default = 2,
mandatory = False,
),
"attr2": 5,
},
implementation = _impl,
)
| 1.929688 | 2 |
src/printReport.py | griimx/Summer-2016 | 0 | 2978 | <filename>src/printReport.py
from __future__ import print_function
from connection import *
from jinja2 import Environment, FileSystemLoader
import webbrowser
def print_report(id):
env = Environment(loader=FileSystemLoader('.'))
template = env.get_template("src/template.html")
cursor = db.cursor(MySQLdb.cursors.DictCursor)
sql = "SELECT e.*, b.*, d.`depName` "
sql += "FROM `employees` e, `baccounts` b, `departments` d "
sql +="WHERE e.`empID` = b.`empdb_empID` "
sql +="AND e.`depDB_depID` = d.`depID` "
sql +="AND e.`empID` = '"+ id +"'"
# print(sql)
cursor.execute(sql)
result = cursor.fetchall()
# print(result[0])
result = result[0]
print(result)
template_vars = {"empID" : result['empID'],
"firstName" : result['firstName'],
"lastName" : result['lastName'],
"address" : result['address'],
"pin" : result['pin'],
"state" : result['state'],
"adharID" : result['adharID'],
"panID" : result['panID'],
"designation" : result['designation'],
"unit" : result['unit'],
"email" : result['email'],
"mobile" : result['mobile'],
"depName" : result['depName'],
"IFSC" : result['IFSC'],
"ACNo" : result['ACNo'],
"BranchAdd" : result['BranchAdd']
}
content = template.render(template_vars)
with open('print.html', 'w') as static_file:
static_file.write(content)
webbrowser.open_new_tab('print.html')
# self.entry_text(self.entry_name, result['firstName']+" "+result['lastName'] )
# self.entry_text(self.entry_EmpID, result['empID'])
# self.entry_text(self.entry_EmpName, result['firstName']+" "+result['lastName'])
# self.entry_text(self.entry_personalno, result['empID'])
# self.entry_text(self.entry_address,result['address'] )
# self.entry_text(self.entry_pin, result['pin'])
# self.entry_text(self.entry_state, result['state'])
# self.entry_text(self.entry_adhar, result['adharID'])
# self.entry_text(self.entry_pan, result['panID'])
# self.entry_text(self.entry_designation, result['designation'])
# self.entry_text(self.entry_unit, result['unit'])
# self.entry_text(self.entry_emailid, result['email'])
# self.entry_text(self.entry_mobile, result['mobile'])
# self.entry_text(self.entry_department, result['depName'])
# self.entry_text(self.entry_ifsc, result['IFSC'])
# self.entry_text(self.enrtry_acno, result['ACNo'])
# self.entry_text(self.entry_branch, result['BranchAdd'])
| 2.703125 | 3 |
packages/pyre/schemata/Container.py | avalentino/pyre | 25 | 2979 | # -*- coding: utf-8 -*-
#
# <NAME>
# orthologue
# (c) 1998-2021 all rights reserved
#
# superclass
from .Schema import Schema
# declaration
class Container(Schema):
"""
The base class for type declarators that are sequences of other types
"""
# constants
typename = 'container' # the name of my type
isContainer = True
@property
def container(self):
"""
The default container represented by this schema
"""
# complain that the subclass is not constructed properly
raise NotImplementedError(
"class {.__name__} must define a {container} type".format(type(self)))
# interface
def coerce(self, value, **kwds):
"""
Convert {value} into an iterable
"""
# get the worker to build an iterable, cast it into my container type and return it
return self.container(self._coerce(value=value, **kwds))
def render(self, renderer, value, workload):
"""
Render {value} using {renderer}
"""
# get my schema
schema = self.schema
# render just my name
yield renderer.trait(name=self.name, value='')
# go through the items
for item in value:
# ask my schema to render each one
entry = ','.join(schema.render(renderer=renderer, value=item,
workload=workload, incognito=True))
# and put it on a separate line
yield renderer.value(value=f"{entry},")
# all done
return
# meta-methods
def __init__(self, default=object, schema=Schema(), **kwds):
# adjust the default; carefully, so we don't all end up using the same global container
# checking for {None} is not appropriate here; the user may want {None} as the default
# value; we need a way to know that {default} was not supplied: use a TYPE (in this
# case object) as the marker
default = self.container() if default is object else default
# chain up with my default
super().__init__(default=default, **kwds)
# save my schema
self.schema = schema
# all done
return
# end of file
| 2.625 | 3 |
electronicparsers/exciting/parser.py | nomad-coe/electronic-parsers | 0 | 2980 | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import os
import re
import logging
from nomad.units import ureg
from nomad.parsing.file_parser import TextParser, Quantity, XMLParser, DataTextParser
from nomad.datamodel.metainfo.simulation.run import Run, Program
from nomad.datamodel.metainfo.simulation.method import (
Method, DFT, Electronic, Smearing, XCFunctional, Functional,
GW as GWMethod, Scf, BasisSet
)
from nomad.datamodel.metainfo.simulation.system import (
System, Atoms
)
from nomad.datamodel.metainfo.simulation.calculation import (
Calculation, Dos, DosValues, BandStructure, BandEnergies, Energy, EnergyEntry, Charges,
Forces, ForcesEntry, ScfIteration, BandGap
)
from nomad.datamodel.metainfo.workflow import Workflow, GeometryOptimization
from .metainfo.exciting import x_exciting_section_MT_charge_atom, x_exciting_section_MT_moment_atom,\
x_exciting_section_spin, x_exciting_section_fermi_surface,\
x_exciting_section_atoms_group
re_float = r'[-+]?\d+\.\d*(?:[Ee][-+]\d+)?'
class GWInfoParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
def str_to_frequency(val_in):
val = [v.split() for v in val_in.split('\n')]
val = np.transpose(np.array([v for v in val if len(v) == 3], float))
return dict(
number=np.array(val[0], dtype=int), values=val[1] * ureg.hartree,
weights=val[2])
# TODO Read also input parameters here if input_GW.xml does not exist
self._quantities.append(
Quantity(
'frequency_data', r'frequency list:\s*\<\s*#\s*freqs\s*weight\s*>\s*([\d\.Ee\s\-]+)',
str_operation=str_to_frequency, repeats=False)
)
self._quantities.append(
Quantity(
'fermi_energy', r'\-\s*G0W0.+\-\s*\-+\s*[\s\S]*?Fermi [Ee]nergy\s*[:=](\s*-?[\d\.]+)\s',
unit=ureg.hartree, repeats=False)
)
self._quantities.append(
Quantity(
'direct_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Direct BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
self._quantities.append(
Quantity(
'fundamental_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Fundamental BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
self._quantities.append(
Quantity(
'optical_band_gap', r'\-\s*G0W0\s*\-\s*\-+\s*[\s\S]*?Optical BandGap\s*\((?P<__unit>\w+)\)\s*\:(\s*[\d\.]+)\s',
repeats=False)
)
class ExcitingEvalqpParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
def str_to_eigenvalue(val_in):
val = val_in.strip().split('\n')
kpts = np.array(val[0].split(), dtype=float)
keys = val[1].split()
eigs = np.transpose(np.array([v.split() for v in val[2:]], dtype=float))
eigs = {keys[i]: eigs[i] for i in range(len(keys))}
return [kpts, eigs]
self._quantities.append(
Quantity(
'kpoints_eigenvalues', r'\s*k\-point \#\s*\d+:\s*([\d\s\.\-]+)([ \w\(\)]+\n)([\s\d\.\-Ee]+)',
str_operation=str_to_eigenvalue, repeats=True))
class BandstructureDatParser(DataTextParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
# TODO make a parent clas for bandstructure dat and xml
self._nspin = None
self._nkpts_segment = None
self._neigs_segment = None
self._vertices = None
self._distances = None
self._band_energies = None
self._band_k_points = None
@property
def band_energies(self):
if self._band_energies is None:
if self.data is None:
return
data = np.transpose(self.data)
n_kpoints = int(max(data[1]))
bands = data[6:]
bands = np.reshape(bands, (
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues, n_kpoints))
self._band_energies = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([np.transpose(band)[start:end] for band in bands])
if self._energy_unit:
band_energy = band_energy * self._energy_unit
self._band_energies.append(band_energy)
start = end
return self._band_energies
@property
def band_k_points(self):
if self._band_k_points is None:
data = np.transpose(self.data)
self._band_k_points = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
self._band_k_points.append(
np.transpose(data[2:5])[start:end])
start = end
return self._band_k_points
@property
def distances(self):
if self._distances is None:
data = np.transpose(self.data)
self._distances = data[5][:int(max(data[1]))]
return self._distances
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = np.shape(np.transpose(self.data))[0] - 6
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment.append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
data = np.transpose(self.data)
self._neigs_segment = int(max(data[0]))
return self._neigs_segment
class BandOutParser(DataTextParser):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
self._nspin = None
self._distances = None
self._band_energies = None
self._neigs_segment = None
self._nkpts_segment = None
@property
def band_energies(self):
if self._band_energies is None:
data = np.transpose(self.data)
n_kpoints = np.where(data[0] == data[0][0])[0][1]
bands = data[1:]
bands = np.reshape(bands, (
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues, n_kpoints))
self._band_energies = []
start = 0
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([np.transpose(band)[start:end] for band in bands])
if self._energy_unit:
band_energy = band_energy * self._energy_unit
self._band_energies.append(band_energy)
start = end
return self._band_energies
@property
def distances(self):
if self._distances is None:
dist = np.transpose(self.data)[0]
n_k_points = np.where(dist == dist[0])[0][1]
self._distances = dist[:n_k_points]
return self._distances
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = np.shape(np.transpose(self.data)[1:])[0]
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment.append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
data = np.transpose(self.data)[0]
self._neigs_segment = len(np.where(data == data[0])[0])
return self._neigs_segment
class BandstructureXMLParser(XMLParser):
def __init__(self, **kwargs):
# TODO make a parent class for dos and bandstructure
super().__init__(None)
self._distance_key = 'distance'
self._coord_key = 'coord'
self._energy_key = 'eval'
self._vertex_key = 'vertex'
self._band_key = 'band'
self._atom_key = 'atom'
self._nspin = kwargs.get('nspin', None)
self._energy_unit = kwargs.get('energy_unit', None)
def init_parameters(self):
self._nspin = None
self._nkpts_segment = None
self._neigs_segment = None
self._bands = None
self._vertices = None
self._distances = None
self._species = None
@property
def distances(self):
if self._distances is None:
if not self.bands:
return
self._distances = [
point.attrib.get(self._distance_key) for point in self.bands[0][0]]
self._distances = np.array(self._distances, dtype=float)
return self._distances
@property
def bands(self):
if self._bands is None:
bands = self.root.findall('./%s' % self._band_key)
self._bands = []
if bands:
self._bands.append(bands)
# add atom-resolved
bands_atom = self.root.findall('./*/%s' % self._atom_key)
for band in bands_atom:
self._bands.append(band.findall('./%s' % self._band_key))
return self._bands
@property
def vertices(self):
if self._vertices is None:
self._vertices = self.root.findall('./%s' % self._vertex_key)
return self._vertices
@property
def number_of_spin_channels(self):
if self._nspin is None:
self._nspin = 1
return self._nspin
@property
def number_of_k_points_per_segment(self):
if self._nkpts_segment is None:
self._nkpts_segment = []
count = 1
for i in range(1, len(self.distances)):
if self.distances[i] == self.distances[i - 1]:
self._nkpts_segment .append(count)
count = 1
else:
count += 1
self._nkpts_segment.append(count)
return self._nkpts_segment
@property
def number_of_band_segment_eigenvalues(self):
if self._neigs_segment is None:
self._neigs_segment = len(self.bands[0]) // self.number_of_spin_channels
return self._neigs_segment
def parse(self, key):
if self._results is None:
self._results = dict()
if not self.bands:
return
if key == 'band_energies':
# TODO I am not certain about the format for the spin polarized case
# I cannot find an example bandstructure file
# atom-resolved bandstructure are added as separate section_k_band
res = []
for n in range(len(self.bands)):
res_n = []
start = 0
band_energies = np.zeros((
self.number_of_spin_channels, self.number_of_band_segment_eigenvalues,
len(self.distances)), dtype=float)
for i in range(len(self.bands[n])):
band_energies[i % self.number_of_spin_channels][i] = np.array(
[e.attrib.get(self._energy_key) for e in self.bands[n][i]])
for nkpts_segment in self.number_of_k_points_per_segment:
end = start + nkpts_segment
band_energy = np.array([
np.transpose(energy)[start:end] for energy in band_energies])
if self._energy_unit is not None:
band_energy = band_energy * self._energy_unit
res_n.append(band_energy)
start = end
res.append(res_n)
elif key == 'band_k_points':
res = []
for i in range(len(self.number_of_k_points_per_segment)):
start = np.array(
self.vertices[i].attrib.get(self._coord_key).split(), dtype=float)
end = np.array(
self.vertices[i + 1].attrib.get(self._coord_key).split(), dtype=float)
res.append(np.linspace(start, end, self.number_of_k_points_per_segment[i]))
elif key == 'band_segm_labels':
res = []
for i in range(len(self.vertices) - 1):
start = self.vertices[i].attrib.get('label')
end = self.vertices[i + 1].attrib.get('label')
res.append([
'\u0393' if start.lower() == 'gamma' else start,
'\u0393' if end.lower() == 'gamma' else end])
elif key == 'band_segm_start_end':
res = []
for i in range(len(self.number_of_k_points_per_segment)):
start = self.vertices[i].attrib.get(self._coord_key).split()
end = self.vertices[i + 1].attrib.get(self._coord_key).split()
res.append([start, end])
else:
res = None
self._results[key] = res
class DOSXMLParser(XMLParser):
def __init__(self, **kwargs):
super().__init__(None)
self._nspin_key = 'nspin'
self._totaldos_key = 'totaldos'
self._partialdos_key = 'partialdos'
self._diagram_key = 'diagram'
self._l_key = 'l'
self._m_key = 'm'
self._energy_key = 'e'
self._dos_key = 'dos'
self._unit_key = 'unit'
self._energy_unit = kwargs.get('energy_unit', None)
self._units_mapping = dict(hartree=ureg.hartree)
def init_parameters(self):
self._ndos = None
self._natoms = None
self._nspin = None
self._nlm = None
self._energies = None
self._total_dos = None
self._partial_dos = None
@property
def energy_unit(self):
if self._energy_unit is None:
axis = self.root.find('./axis')
if axis is None:
return
self._energy_unit = self._units_mapping.get(axis.attrib.get(self._unit_key).lower(), 1)
return self._energy_unit
@property
def number_of_spin_channels(self):
if self._nspin is None:
if not self.total_dos:
return
self._nspin = len(self.total_dos)
return self._nspin
@property
def number_of_atoms(self):
if self._natoms is None:
partial_dos = self.root.findall('./%s' % self._partialdos_key)
self._natoms = len(partial_dos)
return self._natoms
@property
def number_of_dos(self):
if self._ndos is None:
total_dos = self.root.find('./%s/%s' % (self._totaldos_key, self._diagram_key))
self._ndos = len(total_dos)
return self._ndos
@property
def number_of_lm(self):
if self._nlm is None:
if self.partial_dos is None:
return
self._nlm = 0
l_list = set([int(e.attrib.get(self._l_key)) for e in self.partial_dos])
for li in l_list:
self._nlm += 2 * li + 1
return self._nlm
@property
def total_dos(self):
if self._total_dos is None:
self._total_dos = self.root.findall('./%s/%s' % (self._totaldos_key, self._diagram_key))
return self._total_dos
@property
def partial_dos(self):
if self._partial_dos is None:
self._partial_dos = self.root.findall('./%s/%s' % (self._partialdos_key, self._diagram_key))
return self._partial_dos
@property
def energies(self):
if self._energies is None:
if self.total_dos is None:
return
self._energies = np.array(
[float(point.attrib.get(self._energy_key)) for point in self.total_dos[0]])
if self.energy_unit is not None:
self._energies = self._energies * self.energy_unit
return self._energies
def _get_dos(self, diagram):
dos = np.array(
[point.attrib.get(self._dos_key) for point in diagram], dtype=float)
return dos
def parse(self, key):
if self._results is None:
self._results = dict()
if 'total' in key:
if not self.total_dos:
return
res = np.zeros((self.number_of_spin_channels, self.number_of_dos))
for i in range(len(self.total_dos)):
spin = self.total_dos[i].attrib.get(self._nspin_key, i)
res[i] = self._get_dos(self._total_dos[i])
if self.energy_unit is not None:
res = res * (1 / self.energy_unit)
elif 'partial' in key:
if not self.partial_dos:
return
res = np.zeros((
self.number_of_lm, self.number_of_spin_channels, self.number_of_atoms, self.number_of_dos))
for i in range(len(self.partial_dos)):
spin = self.partial_dos[i].attrib.get(self._nspin_key, None)
if spin is None:
spin = (i % (self.number_of_spin_channels * self.number_of_lm)) // self.number_of_lm
else:
spin = int(spin) - 1
val_l = self.partial_dos[i].attrib.get(self._l_key, None)
val_m = self.partial_dos[i].attrib.get(self._m_key, None)
if val_l is None or val_m is None:
lm = i % self.number_of_lm
else:
lm = int(val_l) ** 2 + int(val_m) + int(val_l)
atom = i // (self.number_of_lm * self.number_of_spin_channels)
res[lm][spin][atom] = self._get_dos(self.partial_dos[i])
if self.energy_unit is not None:
res = res * (1 / self.energy_unit)
elif key == 'energies':
return self.energies
else:
res = None
self._results[key] = res
class ExcitingFermiSurfaceBxsfParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
self._quantities.append(
Quantity(
'fermi_energy', r'Fermi Energy:\s*([\d\.]+)\s*', unit=ureg.hartree, repeats=False))
def str_to_band_parameters(val_in):
val = val_in.strip().split('\n')
nbands = int(val[0])
mesh = np.array(val[1].split(), dtype=int)
origin = np.array(val[2].split(), dtype=float)
vector = np.array([v.split() for v in val[3:6]], dtype=float)
return [nbands, mesh, origin, vector]
self._quantities.append(
Quantity(
'band_parameters', r'BANDGRID_3D_BANDS\s*([\d\.\-Ee\s]+)',
str_operation=str_to_band_parameters, repeats=False))
self._quantities.append(
Quantity(
'fermi_surface', r'BAND:\s*\d+\s*([\d\-\+\.Ee\s]+)\n *E*', unit=ureg.hartree,
repeats=True))
class ExcitingEigenvalueParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
self._quantities = []
self._quantities.append(
Quantity(
'k_points', r'\s*\d+\s*([\d\.Ee\- ]+):\s*k\-point', repeats=True))
def str_to_eigenvalues(val_in):
val = val_in[:val_in.rfind('\n \n')].strip()
val = np.array([v.split() for v in val.split('\n')], dtype=float)
val = np.transpose(val)
occs = val[-1]
eigs = val[-2]
nspin = 2 if occs[0] == 1. else 1
data = dict()
data['occupancies'] = np.reshape(occs, (nspin, len(occs) // nspin))
data['eigenvalues'] = np.reshape(eigs, (nspin, len(eigs) // nspin))
return data
self._quantities.append(
Quantity(
'eigenvalues_occupancies', r'\(state\, eigenvalue and occupancy below\)\s*([\d\.Ee\-\s]+?(?:\n *\n))',
str_operation=str_to_eigenvalues, repeats=True))
class ExcitingGWOutParser(TextParser):
def __init__(self, mainfile, logger):
super().__init__(mainfile, logger=logger)
def init_quantities(self):
self._quantities = []
class ExcitingInfoParser(TextParser):
def __init__(self):
super().__init__(None)
def init_quantities(self):
re_symbol = re.compile(r'([A-Z][a-z]?)')
def str_to_array(val_in):
val = [v.split(':')[-1].split() for v in val_in.strip().split('\n')]
val = val[0] if len(val) == 1 else val
return np.array(val, dtype=float)
def str_to_atom_properties_dict(val_in):
unit = None
if 'charge' in val_in:
unit = ureg.elementary_charge
elif 'moment' in val_in:
unit = ureg.elementary_charge * ureg.bohr
val = val_in.strip().split('\n')
properties = dict()
atom_resolved = []
species = None
for v in val:
v = v.strip().split(':')
if len(v) < 2:
continue
elif v[0].startswith('species'):
species = re.search(re_symbol, v[-1]).group(1)
elif v[0].startswith('atom'):
v[0] = v[0].split()
v[1] = [float(vi) for vi in v[1].split()]
v[1] = v[1][0] if len(v[1]) == 1 else v[1]
if species is None:
species = v[0][2]
atom_resolved.append(((species, v[1] * unit)))
else:
vi = [float(vii) for vii in v[1].split()]
vi = vi[0] if len(vi) == 1 else vi
properties[v[0].strip()] = vi * unit
properties['atom_resolved'] = atom_resolved
return properties
def str_to_quantity_tolerances(val_in):
return val_in.strip().replace('(', '').replace(')', '').split()
def str_to_energy_dict(val_in):
val = val_in.strip().split('\n')
energies = dict()
for v in val:
v = v.split(':')
if len(v) < 2:
continue
energies[v[0].strip()] = float(v[1]) * ureg.hartree
return energies
self._quantities = [Quantity(
'program_version', r'\s*EXCITING\s*([\w\-\(\)\. ]+)\s*started', repeats=False,
dtype=str, flatten=False)]
initialization_quantities = [
Quantity(
'lattice_vectors',
r'Lattice vectors\s*[\(cartesian\)]*\s*:\s*([\-0-9\.\s]+)\n',
str_operation=str_to_array, unit=ureg.bohr, repeats=False, convert=False),
Quantity(
'lattice_vectors_reciprocal',
r'Reciprocal lattice vectors\s*[\(cartesian\)]*\s*:\s*([\-0-9\.\s]+)\n',
str_operation=str_to_array, unit=1 / ureg.bohr, repeats=False, convert=False),
]
self._system_keys_mapping = {
'x_exciting_unit_cell_volume': ('Unit cell volume', ureg.bohr ** 3),
'x_exciting_brillouin_zone_volume': ('Brillouin zone volume', 1 / ureg.bohr ** 3),
'x_exciting_number_of_atoms': ('Total number of atoms per unit cell', None),
'x_exciting_spin_treatment': ('Spin treatment', None),
'x_exciting_number_of_bravais_lattice_symmetries': ('Number of Bravais lattice symmetries', None),
'x_exciting_number_of_crystal_symmetries': ('Number of crystal symmetries', None),
'x_exciting_kpoint_grid': (r'k\-point grid', None),
'x_exciting_kpoint_offset': (r'k\-point offset', None),
'x_exciting_number_kpoints': (r'Total number of k\-points', None),
'x_exciting_rgkmax': (r'R\^MT\_min \* \|G\+k\|\_max \(rgkmax\)', None),
'x_exciting_species_rtmin': (r'Species with R\^MT\_min', None),
'x_exciting_gkmax': (r'Maximum \|G\+k\| for APW functions', 1 / ureg.bohr),
'x_exciting_gmaxvr': (r'Maximum \|G\| for potential and density', 1 / ureg.bohr),
'x_exciting_gvector_size': (r'G\-vector grid sizes', None),
'x_exciting_gvector_total': (r'Total number of G\-vectors', None),
'x_exciting_lmaxapw': (r' APW functions', None),
'x_exciting_nuclear_charge': ('Total nuclear charge', ureg.elementary_charge),
'x_exciting_electronic_charge': ('Total electronic charge', ureg.elementary_charge),
'x_exciting_core_charge_initial': ('Total core charge', ureg.elementary_charge),
'x_exciting_valence_charge_initial': ('Total valence charge', ureg.elementary_charge),
'x_exciting_wigner_radius': (r'Effective Wigner radius, r\_s', ureg.bohr),
'x_exciting_empty_states': ('Number of empty states', None),
'x_exciting_valence_states': ('Total number of valence states', None),
'x_exciting_hamiltonian_size': ('Maximum Hamiltonian size', None),
'x_exciting_pw': (r'Maximum number of plane\-waves', None),
'x_exciting_lo': (r'Total number of local\-orbitals', None)}
self._method_keys_mapping = {
'smearing_kind': ('Smearing scheme', None),
'smearing_width': ('Smearing width', None)}
for name, key_unit in self._system_keys_mapping.items():
initialization_quantities.append(
Quantity(
name, r'%s\s*:\s*([\s\S]*?)\n' % key_unit[0], unit=key_unit[1], repeats=False)
)
for name, key_unit in self._method_keys_mapping.items():
initialization_quantities.append(
Quantity(
name, r'%s\s*:\s*([\s\S]*?)\n' % key_unit[0], unit=key_unit[1], repeats=False)
)
initialization_quantities.append(Quantity(
'species',
rf'(Species : *\d+ *\(\w+\)[\s\S]+?{re_float} *{re_float} *{re_float}\n\s*\n)',
repeats=True, sub_parser=TextParser(quantities=[
Quantity('number', r'Species : *(\d+)', dtype=np.int32),
Quantity('symbol', r'\((\w+)\)'),
Quantity('file', r'parameters loaded from *: *(.+)'),
Quantity('name', r'name *: *(.+)'),
Quantity('nuclear_charge', rf'nuclear charge *: *({re_float})', dtype=np.float64, unit=ureg.elementary_charge),
Quantity('electronic_charge', rf'electronic charge *: *({re_float})', dtype=np.float64, unit=ureg.elementary_charge),
Quantity('atomic_mass', rf'atomic mass *: *({re_float})', dtype=np.float64, unit=ureg.electron_mass),
Quantity('muffin_tin_radius', rf'muffin-tin radius *: *({re_float})', dtype=np.float64, unit=ureg.bohr),
Quantity('radial_points', rf'radial points in muffin-tin *: *({re_float})', dtype=np.int32),
Quantity('positions_format', r'atomic positions \((.+?)\)', flatten=False),
Quantity(
'positions',
rf'\d+ : *({re_float}) *({re_float}) *({re_float})',
repeats=True, dtype=np.dtype(np.float64))])))
initialization_quantities.append(Quantity(
'potential_mixing', r'Using ([\w ]+) potential mixing', repeats=False, flatten=False)
)
initialization_quantities.append(Quantity(
'xc_functional', r'(Exchange-correlation type[\s\S]+?\n *\n)',
sub_parser=TextParser(quantities=[
Quantity('type', r'Exchange-correlation type +: +(\S+)'),
Quantity(
'name_reference',
r'\n *(.+?,.+)',
str_operation=lambda x: [v.strip() for v in x.split(':')]),
Quantity(
'parameters',
r'\n *(.+?:.+)', repeats=True,
str_operation=lambda x: [v.strip() for v in x.split(':')])]))
)
self._quantities.append(Quantity(
'initialization',
r'(?:All units are atomic|Starting initialization)([\s\S]+?)(?:Using|Ending initialization)', repeats=False,
sub_parser=TextParser(quantities=initialization_quantities))
)
scf_quantities = [
Quantity(
'energy_total', r'[Tt]*otal energy\s*:\s*([\-\d\.Ee]+)', repeats=False,
dtype=float, unit=ureg.hartree),
Quantity(
'energy_contributions', r'(?:Energies|_)([\+\-\s\w\.\:]+?)\n *(?:DOS|Density)',
str_operation=str_to_energy_dict, repeats=False, convert=False),
Quantity(
'x_exciting_dos_fermi',
r'DOS at Fermi energy \(states\/Ha\/cell\)\s*:\s*([\-\d\.Ee]+)',
repeats=False, dtype=float, unit=1 / ureg.hartree),
Quantity(
'charge_contributions',
r'(?:Charges|Electron charges\s*\:*\s*)([\-\s\w\.\:\(\)]+?)\n *[A-Z\+]',
str_operation=str_to_atom_properties_dict, repeats=False, convert=False),
Quantity(
'moment_contributions',
r'(?:Moments\s*\:*\s*)([\-\s\w\.\:\(\)]+?)\n *[A-Z\+]',
str_operation=str_to_atom_properties_dict, repeats=False, convert=False)]
self._miscellaneous_keys_mapping = {
'x_exciting_gap': (r'Estimated fundamental gap', ureg.hartree),
'time': (r'Wall time \(seconds\)', ureg.s)}
for name, key_unit in self._miscellaneous_keys_mapping.items():
scf_quantities.append(Quantity(
name, r'%s\s*\:*\s*([\-\d\.Ee]+)' % key_unit[0], repeats=False,
unit=key_unit[1]))
self._convergence_keys_mapping = {
'x_exciting_effective_potential_convergence': (
r'RMS change in effective potential \(target\)', ureg.hartree),
'x_exciting_energy_convergence': (
r'Absolute change in total energy\s*\(target\)', ureg.hartree),
'x_exciting_charge_convergence': (
r'Charge distance\s*\(target\)', ureg.elementary_charge),
'x_exciting_IBS_force_convergence': (
r'Abs\. change in max\-nonIBS\-force\s*\(target\)', ureg.hartree / ureg.bohr)}
for name, key_unit in self._convergence_keys_mapping.items():
scf_quantities.append(Quantity(
name, r'%s\s*\:*\s*([\(\)\d\.\-\+Ee ]+)' % key_unit[0],
str_operation=str_to_quantity_tolerances, unit=key_unit[1], repeats=False))
module_quantities = [
Quantity(
'scf_iteration', r'(?:I| i)teration number :([\s\S]+?)(?:\n *\n\+{10}|\+\-{10})',
sub_parser=TextParser(quantities=scf_quantities), repeats=True),
Quantity(
'final',
r'(?:Convergence targets achieved\. Performing final SCF iteration|Reached self-consistent loops maximum)([\s\S]+?)(\n *\n\+{10})',
sub_parser=TextParser(quantities=scf_quantities), repeats=False),
Quantity(
'atomic_positions',
r'(Atomic positions\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'Atomic positions\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces', r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Atomic',
repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.hartree / ureg.bohr)
]
self._quantities.append(Quantity(
'groundstate',
r'(?:Self\-consistent loop started|Groundstate module started)([\s\S]+?)Groundstate module stopped',
sub_parser=TextParser(quantities=module_quantities), repeats=False))
optimization_quantities = [
Quantity(
'atomic_positions',
r'(Atomic positions at this step\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'Atomic positions at this step\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces',
r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Time',
repeats=False, str_operation=str_to_array, convert=False, unit=ureg.hartree / ureg.bohr),
Quantity(
'step', r'Optimization step\s*(\d+)', repeats=False, dtype=int),
Quantity(
'method', r'method\s*=\s*(\w+)', repeats=False, dtype=str),
Quantity(
'n_scf_iterations',
r'Number of (?:total)* scf iterations\s*\:\s*(\d+)', repeats=False, dtype=int),
Quantity(
'force_convergence',
r'Maximum force magnitude\s*\(target\)\s*\:(\s*[\(\)\d\.\-\+Ee ]+)',
str_operation=str_to_quantity_tolerances, unit=ureg.hartree / ureg.bohr, repeats=False,
dtype=float),
Quantity(
'energy_total', r'Total energy at this optimization step\s*\:\s*([\-\d\.Ee]+)',
unit=ureg.hartree, repeats=False, dtype=float),
Quantity(
'time', r'Time spent in this optimization step\s*\:\s*([\-\d\.Ee]+)\s*seconds',
unit=ureg.s, repeats=False, dtype=float)
]
self._quantities.append(Quantity(
'structure_optimization',
r'Structure\-optimization module started([\s\S]+?)Structure\-optimization module stopped',
sub_parser=TextParser(quantities=[
Quantity(
'optimization_step',
r'(Optimization step\s*\d+[\s\S]+?(?:\n *\n\-{10}|Time spent in this optimization step\s*:\s*[\d\.]+ seconds))',
sub_parser=TextParser(quantities=optimization_quantities),
repeats=True),
Quantity(
'final',
r'Force convergence target achieved([\s\S]+?Opt)',
sub_parser=TextParser(quantities=scf_quantities),
repeats=False),
Quantity(
'atomic_positions',
r'(imized atomic positions\s*\([\s\S]+?)\n\n',
sub_parser=TextParser(quantities=[
Quantity(
'positions_format', r'imized atomic positions\s*\(([a-z]+)\)'),
Quantity(
'symbols', r'atom\s*\d+\s*(\w+)', repeats=True, dtype=str),
Quantity(
'positions', r'\s*:\s*([\d\.\-]+\s*[\d\.\-]+\s*[\d\.\-]+)',
repeats=True, dtype=float)])),
Quantity(
'forces',
r'Total atomic forces including IBS \(\w+\)\s*\:(\s*atom[\-\s\w\.\:]*?)\n *Atomic',
repeats=False, str_operation=str_to_array, dtype=float, unit=ureg.hartree / ureg.bohr),
]), repeats=False))
self._quantities.append(Quantity(
'hybrids',
r'Hybrids module started([\s\S]+?)Hybrids module stopped',
sub_parser=TextParser(quantities=module_quantities)
))
def get_atom_labels(self, section):
labels = section.get('symbols')
if labels is None:
# we get it by concatenating species symbols
species = self.get('initialization', {}).get('species', [])
labels = []
for specie in species:
labels += [specie.get('symbol')] * len(specie.get('positions'))
return labels
def get_positions_format(self, section):
positions_format = section.get('positions_format')
if positions_format is None:
species = self.get_initialization_parameter('species', [])
for specie in species:
positions_format = specie.get('positions_format', None)
if positions_format is not None:
break
return positions_format
def get_atom_positions(self, section={}, positions=None, positions_format=None):
positions = positions if positions is not None else section.get('positions')
if positions is None:
species = self.get_initialization_parameter('species', [])
if species:
positions = np.vstack([s.get('positions') for s in species])
if positions is None:
return
positions = np.array(positions)
positions_format = positions_format if positions_format is not None else self.get_positions_format(section)
if positions_format == 'lattice':
cell = self.get_initialization_parameter('lattice_vectors')
if cell is None:
return
positions = np.dot(positions, cell.magnitude)
return positions * ureg.bohr
def get_scf_threshold(self, name):
reference = self.get('groundstate', self.get('hybrids', {}))
return reference.get('scf_iteration', [{}])[-1].get(
name, [None, None])[-1]
def get_scf_quantity(self, name):
n_scf = len(self.get('energy_total_scf_iteration', []))
quantity = self.get('%s_scf_iteration' % name)
if quantity is None:
return
# this is really problematic if some scf steps dont have the quantity
# the only thing that we can do is to assume that the first steps are the
# ones with the missing quantity
if len(quantity) < n_scf:
quantity = [None] * (n_scf - len(quantity)) + quantity
return quantity
def get_xc_functional_name(self):
# TODO expand list to include other xcf
xc_functional_map = {
2: ['LDA_C_PZ', 'LDA_X_PZ'],
3: ['LDA_C_PW', 'LDA_X_PZ'],
4: ['LDA_C_XALPHA'],
5: ['LDA_C_VBH'],
20: ['GGA_C_PBE', 'GGA_X_PBE'],
21: ['GGA_C_PBE', 'GGA_X_PBE_R'],
22: ['GGA_C_PBE_SOL', 'GGA_X_PBE_SOL'],
26: ['GGA_C_PBE', 'GGA_X_WC'],
30: ['GGA_C_AM05', 'GGA_C_AM05'],
300: ['GGA_C_BGCP', 'GGA_X_PBE'],
406: ['HYB_GGA_XC_PBEH'],
408: ['HYB_GGA_XC_HSE03']}
xc_functional = self.get('initialization', {}).get('xc_functional', None)
if xc_functional is None:
return []
name = xc_functional_map.get(xc_functional.type, [])
return name
@property
def n_optimization_steps(self):
return len(self.get('structure_optimization', {}).get('optimization_step', []))
def get_number_of_spin_channels(self):
spin_treatment = self.get('initialization', {}).get(
'x_exciting_spin_treatment', 'spin-unpolarised')
n_spin = 1 if spin_treatment.lower() == 'spin-unpolarised' else 2
return n_spin
def get_unit_cell_volume(self):
return self.get('initialization', {}).get('x_exciting_unit_cell_volume', 1.0 * ureg.bohr ** 3)
def get_initialization_parameter(self, key, default=None):
return self.get('initialization', {}).get(key, default)
class ExcitingParser:
def __init__(self):
self.info_parser = ExcitingInfoParser()
self.dos_parser = DOSXMLParser(energy_unit=ureg.hartree)
self.bandstructure_parser = BandstructureXMLParser(energy_unit=ureg.hartree)
self.eigval_parser = ExcitingEigenvalueParser()
self.fermisurf_parser = ExcitingFermiSurfaceBxsfParser()
self.evalqp_parser = ExcitingEvalqpParser()
self.dos_out_parser = DataTextParser()
self.bandstructure_dat_parser = BandstructureDatParser(energy_unit=ureg.hartree)
self.band_out_parser = BandOutParser(energy_unit=ureg.hartree)
self.info_gw_parser = GWInfoParser()
self.input_xml_parser = XMLParser()
self.data_xs_parser = DataTextParser()
self.data_clathrate_parser = DataTextParser(dtype=str)
# different names for different versions of exciting
self._energy_keys_mapping = {
'energy_total': ['Total energy', 'total energy'],
'x_exciting_fermi_energy': ['Fermi energy', 'Fermi'],
'energy_kinetic_electronic': ['Kinetic energy', 'electronic kinetic'],
'energy_coulomb': ['Coulomb energy', 'Coulomb'],
'x_exciting_coulomb_energy': ['Coulomb energy', 'Coulomb'],
'energy_exchange': ['Exchange energy', 'exchange'],
'x_exciting_exchange_energy': ['Exchange energy', 'exchange'],
'energy_correlation': ['Correlation energy', 'correlation'],
'x_exciting_correlation_energy': ['Correlation energy', 'correlation'],
'energy_sum_eigenvalues': ['Sum of eigenvalues', 'sum of eigenvalues'],
'x_exciting_effective_potential_energy': ['Effective potential energy'],
'x_exciting_coulomb_potential_energy': ['Coulomb potential energy', 'Coulomb potential'],
'energy_xc_potential': ['xc potential energy', 'xc potential'],
'energy_electrostatic': ['Hartree energy', 'Hartree'],
'x_exciting_hartree_energy': ['Hartree energy', 'Hartree'],
'x_exciting_electron_nuclear_energy': ['Electron-nuclear energy', 'electron-nuclear '],
'x_exciting_nuclear_nuclear_energy': ['Nuclear-nuclear energy', 'nuclear-nuclear'],
'x_exciting_madelung_energy': ['Madelung energy', 'Madelung'],
'x_exciting_core_electron_kinetic_energy': ['Core-electron kinetic energy', 'core electron kinetic'],
'x_exciting_dft_d2_dispersion_correction': ['DFT-D2 dispersion correction']
}
self._electron_charge_keys_mapping = {
'x_exciting_core_charge': ['core'],
'x_exciting_core_leakage': ['core leakage'],
'x_exciting_valence_charge': ['valence'],
'x_exciting_interstitial_charge': ['interstitial'],
'x_exciting_total_MT_charge': ['total charge in muffin-tins', 'total in muffin-tins'],
'charge_total': ['total charge'],
'x_exciting_section_MT_charge_atom': ['atom_resolved']
}
self._moment_keys_mapping = {
'x_exciting_interstitial_moment': ['interstitial'],
'x_exciting_total_MT_moment': ['total moment in muffin-tins'],
'x_exciting_total_moment': ['total moment'],
'x_exciting_section_MT_moment_atom': ['atom_resolved']
}
def get_exciting_files(self, default):
mainfile = os.path.basename(self.info_parser.mainfile)
suffix = mainfile.strip('INFO.OUT')
target = default.rsplit('.', 1)
filename = '%s%s' % (target[0], suffix)
if target[1:]:
filename = '%s.%s' % (filename, target[1])
filename = os.path.join(self.info_parser.maindir, filename)
if os.path.isfile(filename):
return [filename]
filename = os.path.join(self.info_parser.maindir, default)
if not os.path.isfile(filename):
file_ext = default.split('.')[-1]
mainfile_base = mainfile.rsplit('.', 1)[0].replace('INFO', '')
options = [
f for f in os.listdir(
self.info_parser.maindir) if target[0] in f and mainfile_base in f]
options = [f for f in options if f.endswith(file_ext)]
options.sort()
filenames = [os.path.join(self.info_parser.maindir, f) for f in options]
else:
filenames = [filename]
filenames = [f for f in filenames if os.access(f, os.F_OK)]
return filenames
def file_exists(self, filename):
"""Checks if a the given filename exists and is accessible in the same
folder where the mainfile is stored.
"""
mainfile = os.path.basename(self.info_parser.mainfile)
suffix = mainfile.strip('INFO.OUT')
target = filename.rsplit('.', 1)
filepath = '%s%s' % (target[0], suffix)
if target[1:]:
filepath = '%s.%s' % (filepath, target[1])
filepath = os.path.join(self.info_parser.maindir, filepath)
if os.path.isfile(filepath) and os.access(filepath, os.F_OK):
return True
return False
def _parse_dos(self, sec_scc):
if self.dos_parser.get('totaldos', None) is None:
return
# Get fermi energy: it is used to un-shift the DOS to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
sec_dos.n_energies = self.dos_parser.number_of_dos
sec_dos.energies = self.dos_parser.energies + energy_fermi
volume = self.info_parser.get_unit_cell_volume()
totaldos = self.dos_parser.get('totaldos') * volume.to('m**3').magnitude
for spin in range(len(totaldos)):
sec_dos_values = sec_dos.m_create(DosValues, Dos.total)
sec_dos_values.spin = spin
sec_dos_values.value = totaldos[spin]
partialdos = self.dos_parser.get('partialdos')
if partialdos is None:
return
partialdos = partialdos.to('1/joule').magnitude
lm_values = np.column_stack((np.arange(len(partialdos)), np.zeros(len(partialdos), dtype=np.int32)))
for lm in range(len(partialdos)):
for spin in range(len(partialdos[lm])):
for atom in range(len(partialdos[lm][spin])):
sec_dos_values = sec_dos.m_create(DosValues, Dos.atom_projected)
sec_dos_values.m_kind = 'spherical'
sec_dos_values.lm = lm_values[lm]
sec_dos_values.spin = spin
sec_dos_values.atom_index = atom
sec_dos_values.value = partialdos[lm][spin][atom]
def _parse_bandstructure(self, sec_scc):
# we need to set nspin again as this is overwritten when setting mainfile
self.bandstructure_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.bandstructure_parser.get('band_energies', [])
for n in range(len(band_energies)):
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
continue
energy_fermi = energy_fermi.to("hartree")
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
band_k_points = self.bandstructure_parser.get('band_k_points')
nkpts_segment = self.bandstructure_parser.number_of_k_points_per_segment
band_seg_labels = self.bandstructure_parser.get('band_segm_labels')
for nb in range(len(band_energies[n])):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.kpoints = band_k_points[nb]
sec_k_band_segment.endpoints_labels = band_seg_labels[nb]
sec_k_band_segment.energies = band_energies[n][nb] + energy_fermi
def _parse_eigenvalues(self, sec_scc):
if self.eigval_parser.get('eigenvalues_occupancies', None) is None:
return
nspin = self.info_parser.get_number_of_spin_channels()
def get_data(key):
data = self.eigval_parser.get('eigenvalues_occupancies')
# reshaping is not necessary as this is done in parser, however nspin is
# determined from occupancies which is problematic sometimes
res = np.hstack([np.reshape(v[key], (nspin, np.size(v[key]) // nspin)) for v in data])
res = res.reshape((len(res), len(data), len(res[0]) // len(data)))
if key == 'eigenvalues':
res = res * ureg.hartree
return res
sec_eigenvalues = sec_scc.m_create(BandEnergies)
sec_eigenvalues.kpoints = self.eigval_parser.get('k_points')
sec_eigenvalues.occupations = get_data('occupancies')
sec_eigenvalues.energies = get_data('eigenvalues')
def _parse_fermisurface(self, sec_scc):
fermi_surface = self.fermisurf_parser.get('fermi_surface', [None])[0]
if fermi_surface is None:
return
sec_fermisurface = sec_scc.m_create(x_exciting_section_fermi_surface)
band_parameters = self.fermisurf_parser.get('band_parameters', None)
if band_parameters is not None:
sec_fermisurface.x_exciting_number_of_bands_fermi_surface = band_parameters[0]
sec_fermisurface.x_exciting_number_of_mesh_points_fermi_surface = np.product(band_parameters[1])
sec_fermisurface.x_exciting_grid_fermi_surface = band_parameters[1]
sec_fermisurface.x_exciting_origin_fermi_surface = band_parameters[2]
sec_fermisurface.x_exciting_vectors_fermi_surface = band_parameters[3]
fermi_energy = self.fermisurf_parser.get('fermi_energy', None)
if fermi_energy is not None:
sec_fermisurface.x_exciting_fermi_energy_fermi_surface = fermi_energy
sec_fermisurface.x_exciting_values_fermi_surface = fermi_surface
def _parse_evalqp(self, sec_scc):
data = self.evalqp_parser.get('kpoints_eigenvalues')
if data is None:
return
def get_data(key):
if key == 'k_points':
return np.array([d[0][:3] for d in data])
elif key == 'Znk':
return np.array([d[1].get(key, None) for d in data])
else:
energy = np.array([d[1].get(key, None) for d in data])
if None in energy:
return energy
return np.array([d[1].get(key) for d in data]) * ureg.hartree
eigs_gw = get_data('E_GW')
if eigs_gw[0] is None:
return
nspin = self.info_parser.get_number_of_spin_channels()
def reshape(data):
if data[0] is None:
return
return np.reshape(data, (nspin, len(data) // nspin, len(data[0])))
sec_gw_eigenvalues = sec_scc.m_create(BandEnergies)
sec_gw_eigenvalues.qp_linearization_prefactor = reshape(get_data('Znk'))
sec_gw_eigenvalues.n_bands = len(eigs_gw[0])
sec_gw_eigenvalues.n_kpoints = len(eigs_gw)
sec_gw_eigenvalues.kpoints = get_data('k_points')
sec_gw_eigenvalues.energies = reshape(eigs_gw)
sec_gw_eigenvalues.value_exchange = reshape(get_data('Sx'))
eigs_gw_C = reshape(get_data('Sc'))
if eigs_gw_C is None:
eigs_gw_C = reshape(get_data('Re(Sc)'))
sec_gw_eigenvalues.value_correlation = eigs_gw_C
sec_gw_eigenvalues.value_xc_potential = reshape(get_data('Vxc'))
def _parse_dos_out(self, sec_scc):
data = self.dos_out_parser.data
if data is None:
return
# Get fermi energy: it is used to un-shift the DOS to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
# TODO I am not sure about format for spin-polarized case! I assume it is
# energy dos_up dos_down
nspin = self.info_parser.get_number_of_spin_channels()
sec_dos = sec_scc.m_create(Dos, Calculation.dos_electronic)
sec_dos.n_energies = len(data) // nspin
data = np.reshape(data, (nspin, len(data) // nspin, 2))
data = np.transpose(data, axes=(2, 0, 1))
sec_dos.energies = data[0][0] * ureg.hartree + energy_fermi
volume = self.info_parser.get_unit_cell_volume()
dos = data[1] * (1 / ureg.hartree) * volume.to('m**3').magnitude
for spin in range(len(dos)):
sec_dos_values = sec_dos.m_create(DosValues, Dos.total)
sec_dos_values.spin = spin
sec_dos_values.value = dos[spin]
# TODO add PDOS
def _parse_bandstructure_dat(self, sec_scc):
self.bandstructure_dat_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.bandstructure_dat_parser.band_energies
if band_energies is None:
return
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = sec_scc.energy.fermi
if energy_fermi is None:
return
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
band_k_points = self.bandstructure_dat_parser.band_k_points
nkpts_segment = self.bandstructure_dat_parser.number_of_k_points_per_segment
for nb in range(len(band_energies)):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.kpoints = band_k_points[nb]
sec_k_band_segment.energies = band_energies[nb] + energy_fermi
def _parse_band_out(self, sec_scc):
self.band_out_parser._nspin = self.info_parser.get_number_of_spin_channels()
band_energies = self.band_out_parser.band_energies
if band_energies is None:
return
# Get fermi energy: it is used to un-shift the band structure to
# the original scale in which also other energies are reported.
energy_fermi = 0.0 * ureg.hartree
if sec_scc.energy is not None:
energy_fermi = sec_scc.energy.fermi
energy_fermi = (energy_fermi.magnitude * ureg.joule).to('hartree')
sec_k_band = sec_scc.m_create(BandStructure, Calculation.band_structure_electronic)
sec_k_band.energy_fermi = energy_fermi
nkpts_segment = self.band_out_parser.number_of_k_points_per_segment
for nb in range(len(band_energies)):
sec_k_band_segment = sec_k_band.m_create(BandEnergies)
sec_k_band_segment.n_kpoints = nkpts_segment[nb]
sec_k_band_segment.value = band_energies[nb] + energy_fermi
def parse_file(self, name, section):
# TODO add support for info.xml, wannier.out
if name.startswith('dos') and name.endswith('xml'):
parser = self.dos_parser
parser_function = self._parse_dos
elif name.startswith('bandstructure') and name.endswith('xml'):
parser = self.bandstructure_parser
parser_function = self._parse_bandstructure
elif name.startswith('EIGVAL') and name.endswith('OUT'):
parser = self.eigval_parser
parser_function = self._parse_eigenvalues
elif (name.startswith('FERMISURF') or name.startswith('FS')) and name.endswith('bxsf'):
parser = self.fermisurf_parser
parser_function = self._parse_fermisurface
elif name.startswith('EVALQP') and (name.endswith('DAT') or name.endswith('TXT')):
parser = self.evalqp_parser
parser_function = self._parse_evalqp
elif name.startswith('TDOS') and name.endswith('OUT'):
parser = self.dos_out_parser
parser_function = self._parse_dos_out
elif name.startswith('bandstructure') and name.endswith('dat'):
parser = self.bandstructure_dat_parser
parser_function = self._parse_bandstructure_dat
elif name.startswith('BAND') and name.endswith('OUT'):
parser = self.band_out_parser
parser_function = self._parse_band_out
elif name.startswith('input') and name.endswith('xml'):
parser = self.input_xml_parser
if self._calculation_type == 'gw':
parser_function = self._parse_input_gw
elif self._calculation_type == 'xs':
parser_function = self._parse_input_xs
else:
# TODO implement reading of parameters from input.xml for normal calculations
# in addition to INFO.OUT
return
else:
return
files = self.get_exciting_files(name)
if len(files) > 1:
self.logger.warn('Found multiple files. Will read all!', data=dict(file=name))
for n in range(len(files)):
parser.mainfile = files[n]
parser_function(section)
# free up memory
parser.mainfile = None
def _parse_input_xs(self, sec_method):
xstype = self.input_xml_parser.get('xs/xstype', None)
if xstype is not None:
sec_method.x_exciting_xs_xstype = xstype
sec_method.x_exciting_electronic_structure_method = xstype
sec_method.x_exciting_xs_broadening = self.input_xml_parser.get(
'xs/broad', 0.01, 'hartree')
sec_method.x_exciting_xs_gqmax = self.input_xml_parser.get(
'xs/gqmax', 0.0, '1/bohr')
sec_method.x_exciting_xs_lmaxapw = self.input_xml_parser.get('xs/lmaxapw', 10)
sec_method.x_exciting_xs_number_of_empty_states = self.input_xml_parser.get(
'xs/nempty', 5)
sec_method.x_exciting_xs_ngridq = self.input_xml_parser.get('xs/ngridq', [1, 1, 1])
sec_method.x_exciting_xs_ngridk = self.input_xml_parser.get('xs/ngridk', [1, 1, 1])
rgkmax = self.input_xml_parser.get('xs/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0.)
sec_method.x_exciting_xs_rgkmax = rgkmax
sec_method.x_exciting_xs_scissor = self.input_xml_parser.get('xs/scissor', 0.0)
sec_method.x_exciting_xs_vkloff = self.input_xml_parser.get('xs/vkloff', [0., 0., 0.])
# TODO I am not certain if screening/BSE are children of xs
if self.input_xml_parser.get('xs/screening') is not None:
sec_method.x_exciting_xs_screening_number_of_empty_states = self.input_xml_parser.get(
'xs/screening/nempty', 0)
sec_method.x_exciting_xs_screening_ngridk = self.input_xml_parser.get(
'xs/screening/ngridk', [0, 0, 0])
rgkmax = self.input_xml_parser.get('xs/screening/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0.)
sec_method.x_exciting_xs_screening_rgkmax = rgkmax
sec_method.x_exciting_xs_screening_type = self.input_xml_parser.get(
'xs/screening/screentype', 'full')
if self.input_xml_parser.get('xs/BSE') is not None:
sec_method.x_exciting_xs_bse_antiresonant = self.input_xml_parser.get(
'xs/BSE/aresbse', True)
sec_method.x_exciting_xs_bse_angular_momentum_cutoff = self.input_xml_parser.get(
'xs/BSE/lmaxdielt', 14)
rgkmax = self.input_xml_parser.get('xs/BSE/rgkmax', None)
if rgkmax is None:
rgkmax = self.info_parser.get_initialization_parameter('x_exciting_rgkmax', 0)
sec_method.x_exciting_xs_bse_rgkmax = rgkmax
sec_method.x_exciting_xs_bse_sciavbd = self.input_xml_parser.get(
'xs/BSE/sciavbd', True)
sec_method.x_exciting_xs_bse_sciavqbd = self.input_xml_parser.get(
'xs/BSE/sciavqbd', False)
sec_method.x_exciting_xs_bse_sciavqhd = self.input_xml_parser.get(
'xs/BSE/sciavqhd', False)
sec_method.x_exciting_xs_bse_sciavqwg = self.input_xml_parser.get(
'xs/BSE/sciavqwg', False)
sec_method.x_exciting_xs_bse_sciavtype = self.input_xml_parser.get(
'xs/BSE/sciavtype', 'spherical')
sec_method.x_exciting_xs_bse_xas = self.input_xml_parser.get(
'xs/BSE/xas', False)
sec_method.x_exciting_xs_bse_number_of_bands = self.input_xml_parser.get(
'xs/BSE/nstlbse', [0, 0, 0, 0])
if sec_method.x_exciting_xs_bse_xas:
sec_method.x_exciting_xs_bse_xasatom = self.input_xml_parser.get(
'xs/BSE/xasatom', 0)
sec_method.x_exciting_xs_bse_xasedge = self.input_xml_parser.get(
'xs/BSE/xasedge', 'K')
sec_method.x_exciting_xs_bse_xasspecies = self.input_xml_parser.get(
'xs/BSE/xasspecies', 0)
sec_method.x_exciting_xs_bse_xas_number_of_bands = self.input_xml_parser.get(
'xs/BSE/nstlxas', [0, 0])
if self.input_xml_parser.get('xs/tddft') is not None:
sec_method.x_exciting_xs_tddft_analytic_continuation = self.input_xml_parser.get(
'xs/tddft/acont', False)
sec_method.x_exciting_xs_tddft_anomalous_Hall_conductivity = self.input_xml_parser.get(
'xs/tddft/ahc', False)
sec_method.x_exciting_xs_tddft_anti_resonant_dielectric = self.input_xml_parser.get(
'xs/tddft/aresdf', False)
sec_method.x_exciting_xs_tddft_anti_resonant_xc_kernel = self.input_xml_parser.get(
'xs/tddft/aresfxc', True)
sec_method.x_exciting_xs_tddft_drude = self.input_xml_parser.get(
'xs/tddft/drude', [0., 0.])
sec_method.x_exciting_xs_tddft_split_parameter = self.input_xml_parser.get(
'xs/tddft/fxcbsesplit', 0.00001, 'hartree')
sec_method.x_exciting_xs_tddft_xc_kernel = self.input_xml_parser.get(
'xs/tddft/fxctype', 'RPA')
sec_method.x_exciting_xs_tddft_finite_q_intraband_contribution = self.input_xml_parser.get(
'xs/tddft/intraband', False)
sec_method.x_exciting_xs_tddft_diagonal_xc_kernel = self.input_xml_parser.get(
'xs/tddft/kerndiag', False)
sec_method.x_exciting_xs_tddft_lmax_alda = self.input_xml_parser.get(
'xs/tddft/lmaxalda', 3)
sec_method.x_exciting_xs_tddft_macroscopic_dielectric_function_q_treatment = self.input_xml_parser.get(
'xs/tddft/mdfqtype', 0)
sec_method.x_exciting_xs_tddft_analytic_continuation_number_of_intervals = self.input_xml_parser.get(
'xs/tddft/nwacont', 0)
sec_method.x_exciting_xs_tetra = self.input_xml_parser.get(
'xs/tetra/tetradf', False)
def _parse_xs_bse(self):
sec_run = self.archive.run[-1]
# TODO read from xml file
def get_files(name):
bse_types = ['IP', 'singlet', 'triplet', 'RPA']
scr_types = ['full', 'diag', 'noinvdiag', 'longrange']
bse_files = []
for bse_type in bse_types:
for scr_type in scr_types:
files = self.get_exciting_files(
'%s_BSE%s_SCR%s.OUT' % (name, bse_type, scr_type))
bse_files.append(files)
return bse_files
def get_data(files):
data = []
for f in files:
self.data_xs_parser.mainfile = f
if self.data_xs_parser.data is None:
continue
data.append(self.data_xs_parser.data)
return data
def parse_exciton(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
sec_scc.x_exciting_xs_bse_number_of_components = n_components
n_excitons = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_number_of_excitons = n_excitons
sec_scc.x_exciting_xs_bse_exciton_energies = np.reshape(
data[1], (n_components, n_excitons)) * ureg.hartree
sec_scc.x_exciting_xs_bse_exciton_binding_energies = np.reshape(
data[2], (n_components, n_excitons)) * ureg.hartree
sec_scc.x_exciting_xs_bse_exciton_oscillator_strength = np.reshape(
data[3], (n_components, n_excitons))
sec_scc.x_exciting_xs_bse_exciton_amplitude_re = np.reshape(
data[4], (n_components, n_excitons))
sec_scc.x_exciting_xs_bse_exciton_amplitude_im = np.reshape(
data[5], (n_components, n_excitons))
def parse_epsilon(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_epsilon = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_number_of_energy_points = n_epsilon
sec_scc.x_exciting_xs_bse_epsilon_energies = np.reshape(
data[0], (n_components, n_epsilon)) * ureg.hartree
sec_scc.x_exciting_xs_bse_epsilon_re = np.reshape(
data[1], (n_components, n_epsilon))
sec_scc.x_exciting_xs_bse_epsilon_im = np.reshape(
data[2], (n_components, n_epsilon))
def parse_sigma(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_sigma = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_sigma_energies = np.reshape(
data[0], (n_components, n_sigma)) * ureg.hartree
sec_scc.x_exciting_xs_bse_sigma_re = np.reshape(
data[1], (n_components, n_sigma))
sec_scc.x_exciting_xs_bse_sigma_im = np.reshape(
data[2], (n_components, n_sigma))
def parse_loss(data, sec_scc):
n_components = len(data)
data = np.transpose(np.vstack(data))
n_loss = len(data[0]) // n_components
sec_scc.x_exciting_xs_bse_loss_energies = np.reshape(
data[0], (n_components, n_loss)) * ureg.hartree
sec_scc.x_exciting_xs_bse_loss = np.reshape(
data[1], (n_components, n_loss))
# TODO check if format of files are really correct, i.e. columns are supposed
# to be what they are. What is the fourth column in epsilon which is not parsed?
sccs = []
for quantity in ['EXCITON', 'EPSILON', 'SIGMA', 'LOSS']:
files = get_files(quantity)
for i in range(len(files)):
data = get_data(files[i])
if not data:
sccs.append(None)
continue
if quantity == 'EXCITON':
sec_scc = sec_run.m_create(Calculation)
sccs.append(sec_scc)
else:
sec_scc = sccs[i]
if sec_scc is None:
# This is the case when there is a mismatch between files
self.logger.warn(
'Mismatch in EXCITON and file type', data=dict(file=quantity))
sec_scc = sec_run.m_create(Calculation)
if quantity == 'EXCITON':
parse_function = parse_exciton
elif quantity == 'EPSILON':
parse_function = parse_epsilon
elif quantity == 'SIGMA':
parse_function = parse_sigma
elif quantity == 'LOSS':
parse_function = parse_loss
else:
continue
try:
parse_function(data, sec_scc)
except Exception:
self.logger.error('Error setting xs data', data=dict(file=quantity))
def _parse_xs_tddft(self):
sec_run = self.archive.run[-1]
fxctype = self.input_xml_parser.get('xs/tddft/fxctype', 'RPA')
tetradf = self.input_xml_parser.get('xs/tetra/tetradf', None)
nwacont = self.input_xml_parser.get('xs/tddft/nwacont', None)
aresdf = self.input_xml_parser.get('xs/tddft/aresdf', True)
file_ext_list = [
'TET' if tetradf else None, 'AC' if nwacont else None, 'NAR' if not aresdf else None]
file_ext = '_'.join([e for e in file_ext_list if e])
# read q points
qpoints = self.input_xml_parser.get('xs/qpointset/qpoint')
def get_data(quantity, ext):
# all files related to quantity at all qpoints
files = self.get_exciting_files('%s_%s%s%s.OUT' % (quantity, file_ext, ext, fxctype))
data = [[], [], []]
for i in range(len(qpoints)):
data_q = []
files_q = [f for f in files if f.endswith('QMT%s.OUT' % str(i + 1).rjust(3, '0'))]
for f in files_q:
self.data_xs_parser.mainfile = f
if self.data_xs_parser.data is None:
continue
data_q.append(self.data_xs_parser.data)
if not data_q:
continue
data_q = np.transpose(data_q, axes=(2, 0, 1))
for j in range(len(data)):
data[j].append(data_q[j])
return data
for quantity in ['EPSILON', 'LOSS', 'SIGMA']:
for ext in ['FXC', 'NLF_FXC']:
data = get_data(quantity, ext)
if not data[0]:
continue
if quantity == 'EPSILON' and ext == 'FXC':
sec_scc = sec_run.m_create(Calculation)
sec_scc.x_exciting_xs_tddft_number_of_epsilon_values = len(data[0][0][0])
sec_scc.x_exciting_xs_tddft_epsilon_energies = data[0][0][0] * ureg.hartree
sec_scc.x_exciting_xs_tddft_dielectric_function_local_field = data[1:]
elif quantity == 'EPSILON' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_dielectric_function_no_local_field = data[1:3]
elif quantity == 'LOSS' and ext == 'FXC':
sec_scc.x_exciting_xs_tddft_loss_function_local_field = data[1]
elif quantity == 'LOSS' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_loss_function_no_local_field = data[1]
elif quantity == 'SIGMA' and ext == 'FXC':
sec_scc.x_exciting_xs_tddft_sigma_local_field = data[1:3]
elif quantity == 'SIGMA' and ext == 'NLF_FXC':
sec_scc.x_exciting_xs_tddft_sigma_no_local_field = data[1:3]
def parse_xs(self):
sec_run = self.archive.run[-1]
xs_info_files = self.get_exciting_files('INFOXS.OUT')
if not xs_info_files:
return
self._calculation_type = 'xs'
# inconsistency in the naming convention for xs input xml file
sec_method = sec_run.m_create(Method)
sec_method_ref = self.archive.run[-1].method[0]
sec_method.starting_method_ref = sec_method_ref
sec_method.methods_ref = [sec_method_ref]
self.parse_file('input.xml', sec_method)
# parse properties
input_file = self.get_exciting_files('input.xml')
if not input_file:
return
self.input_xml_parser.mainfile = input_file[0]
xstype = self.input_xml_parser.get('xs/xstype', '')
if xstype.lower() == 'bse':
self._parse_xs_bse()
elif xstype.lower() == 'tddft':
self._parse_xs_tddft()
def _parse_input_gw(self, sec_method):
sec_gw = sec_method.m_create(GWMethod)
sec_gw.type = 'G0W0'
gmaxvr = self.info_parser.get_initialization_parameter('x_exciting_gmaxvr', 0)
sec_gw.core_treatment = self.input_xml_parser.get(
'gw/coreflag', 'all')
sec_gw.polarizability_number_of_empty_states = int(
self.input_xml_parser.get('gw/nempty', 0))
sec_gw.ngridq = self.input_xml_parser.get('gw/ngridq', [1, 1, 1])
sec_gw.basis_set = 'mixed'
sec_gw.qp_equation_treatment = 'linearization'
sec_gw.max_frequency = self.input_xml_parser.get(
'gw/freqgrid/freqmax', 1.0)
sec_gw.frequency_grid_type = self.input_xml_parser.get(
'gw/freqgrid/fgrid', 'gaule2')
sec_gw.number_of_frequencies = int(self.input_xml_parser.get(
'gw/freqgrid/nomeg', 16))
sec_gw.self_energy_c_number_of_poles = int(self.input_xml_parser.get(
'gw/selfenergy/npol', 0))
sec_gw.self_energy_c_number_of_empty_states = int(self.input_xml_parser.get(
'gw/selfenergy/nempty', 0))
sec_gw.self_energy_singularity_treatment = self.input_xml_parser.get(
'gw/selfenergy/singularity', 'mpd')
sec_gw.self_energy_c_analytical_continuation = self.input_xml_parser.get(
'gw/selfenergy/actype', 'pade')
sec_gw.mixed_basis_lmax = int(self.input_xml_parser.get(
'gw/mixbasis/lmaxmb', 3))
sec_gw.mixed_basis_tolerance = self.input_xml_parser.get(
'gw/mixbasis/epsmb', 0.0001)
gmb = self.input_xml_parser.get('gw/mixbasis/gmb', 1.0)
sec_gw.mixed_basis_gmax = gmb * gmaxvr
pwm = self.input_xml_parser.get('gw/barecoul/pwm', 2.0)
sec_gw.bare_coulomb_gmax = pwm * gmb * gmaxvr
sec_gw.bare_coulomb_cutofftype = self.input_xml_parser.get(
'gw/barecoul/cutofftype', 'none')
sec_gw.screened_coulomb_volume_average = self.input_xml_parser.get(
'gw/scrcoul/sciavtype', 'isotropic')
sec_gw.screened_Coulomb = self.input_xml_parser.get(
'gw/scrcoul/scrtype', 'rpa')
def parse_gw(self):
sec_run = self.archive.run[-1]
# two versions of gw info files
gw_info_files = ['GW_INFO.OUT', 'GWINFO.OUT']
for f in gw_info_files:
if self.get_exciting_files(f):
self._calculation_type = 'gw'
gw_info_file = f
break
if not self._calculation_type == 'gw':
return
sec_method = sec_run.m_create(Method)
sec_method_ref = self.archive.run[-1].method[0]
sec_method.starting_method_ref = sec_method_ref
sec_method.methods_ref = [sec_method_ref]
# parse input xml file, there seems to be two versions, input_gw.xml and input-gw.xml
for f in ['input_gw.xml', 'input-gw.xml', 'input.xml']:
self.parse_file(f, sec_method)
xc_functional_name = ' '.join(self.info_parser.get_xc_functional_name())
sec_method.gw.starting_point = xc_functional_name
sec_scc = sec_run.m_create(Calculation)
sec_scc.method_ref = sec_method
if sec_run.system:
sec_scc.system_ref = sec_run.system[-1]
sec_scc_ref = sec_run.calculation[0]
sec_scc.starting_calculation_ref = sec_scc_ref
sec_scc.calculations_ref = [sec_scc_ref]
# parse properties
gw_info_files = self.get_exciting_files(gw_info_file)
if len(gw_info_files) > 1:
self.logger.warn('Found multiple GW info files, will read only first!')
self.info_gw_parser.mainfile = gw_info_files[0]
fermi_energy = self.info_gw_parser.get('fermi_energy', None)
if fermi_energy is not None:
sec_scc.energy = Energy(fermi=fermi_energy)
gw_files = ['EVALQP.DAT', 'EVALQP.TXT', 'TDOS-QP.OUT']
# Parse GW band structure from one of the files:
bs_files = ['bandstructure-qp.dat', 'BAND-QP.OUT']
for fname in bs_files:
if self.file_exists(fname):
gw_files.append(fname)
break
for f in gw_files:
self.parse_file(f, sec_scc)
frequency_data = self.info_gw_parser.get('frequency_data', None)
if frequency_data is not None:
number = frequency_data.get('number')
sec_method.gw.number_of_frequencies = len(number)
sec_method.gw.frequency_number = number
sec_method.gw.frequency_values = frequency_data.get('values')
sec_method.gw.frequency_weights = frequency_data.get('weights')
fundamental_band_gap = self.info_gw_parser.get('direct_band_gap', None)
if fundamental_band_gap is None:
fundamental_band_gap = self.info_gw_parser.get('fundamental_band_gap', None)
sec_gap = sec_scc.eigenvalues[-1].m_create(BandGap)
if fundamental_band_gap is not None:
sec_gap.value_fundamental = fundamental_band_gap
optical_band_gap = self.info_gw_parser.get('optical_band_gap', None)
if optical_band_gap is not None:
sec_gap.value_optical = optical_band_gap
def parse_miscellaneous(self):
sec_worfklow = self.archive.m_create(Workflow)
sec_worfklow.type = 'single_point'
structure_optimization = self.info_parser.get('structure_optimization')
if structure_optimization is not None:
sec_worfklow.type = 'geometry_optimization'
sec_geometry_opt = sec_worfklow.m_create(GeometryOptimization)
threshold_force = structure_optimization.get(
'optimization_step', [{}])[0].get('force_convergence', [0., 0.])[-1]
sec_geometry_opt.input_force_maximum_tolerance = threshold_force
def parse_method(self):
sec_run = self.archive.run[-1]
sec_method = sec_run.m_create(Method)
sec_method.basis_set.append(BasisSet(type='(L)APW+lo'))
sec_dft = sec_method.m_create(DFT)
sec_electronic = sec_method.m_create(Electronic)
sec_electronic.method = 'DFT'
smearing_kind_map = {
'Gaussian': 'gaussian', 'Methfessel-Paxton': 'methfessel-paxton',
'Fermi-Dirac': 'fermi', 'Extended': 'tetrahedra'}
sec_smearing = sec_electronic.m_create(Smearing)
smearing_kind = self.info_parser.get_initialization_parameter('smearing_kind')
if smearing_kind is not None:
if not isinstance(smearing_kind, str):
smearing_kind = smearing_kind[0]
smearing_kind = smearing_kind_map[smearing_kind]
sec_smearing.kind = smearing_kind
smearing_width = self.info_parser.get_initialization_parameter('smearing_width')
if smearing_width is not None:
smearing_width = (smearing_width * ureg.hartree).to('joule')
# TODO smearing with should have units of energy
sec_smearing.width = smearing_width.magnitude
for name in self.info_parser._convergence_keys_mapping.keys():
threshold = self.info_parser.get_scf_threshold(name)
if threshold is None:
continue
metainfo_name = 'x_exciting_scf_threshold_%s_change' % name.split('_')[-2]
setattr(sec_method, metainfo_name, threshold)
# additionally, set threshold to global metainfo. This is killing me!
if metainfo_name == 'x_exciting_scf_threshold_energy_change':
sec_method.scf = Scf(threshold_energy_change=threshold)
xc_functional_names = self.info_parser.get_xc_functional_name()
if not xc_functional_names:
# get it from input.xml
input_file = self.get_exciting_files('input.xml')
for f in input_file:
self.input_xml_parser.mainfile = f
correlation = self.input_xml_parser.get('libxc/correlation', None)
xc_functional_names.append(correlation)
exchange = self.input_xml_parser.get('libxc/exchange', None)
xc_functional_names.append(exchange)
sec_xc_functional = sec_dft.m_create(XCFunctional)
for name in xc_functional_names:
if name is None:
continue
if '_X_' in name:
sec_xc_functional.exchange.append(Functional(name=name))
elif '_C_' in name:
sec_xc_functional.correlation.append(Functional(name=name))
elif 'HYB' in name:
sec_xc_functional.hybrid.append(Functional(name=name))
else:
sec_xc_functional.contributions.append(Functional(name=name))
if not xc_functional_names:
# simply write parameters
xc_functional = self.info_parser.get('initialization', {}).get('xc_functional')
if xc_functional is not None:
sec_xc_functional.name = xc_functional.get('name_reference', [None, None])[0]
sec_xc_functional.reference = xc_functional.get('name_reference', [None, None])[1]
sec_electronic.n_spin_channels = self.info_parser.get_number_of_spin_channels()
if self._calculation_type == 'volume_optimization':
sec_method.x_exciting_volume_optimization = True
def parse_scc(self, section):
sec_run = self.archive.run[-1]
final = section if section.get('energy_total') is not None else section.get('final')
if final is None:
# get it from last scf_iteration or optimization_step
final = section.get('scf_iteration', [None])[-1]
final = section.get('optimization_step', [None])[-1] if final is None else final
if final is None:
return
sec_scc = sec_run.m_create(Calculation)
def parse_scf(iteration, msection):
energy_total = iteration.get('energy_total')
sec_energy = msection.m_create(Energy)
if energy_total is not None:
sec_energy.total = EnergyEntry(value=energy_total)
x_exciting_dos_fermi = iteration.get('x_exciting_dos_fermi')
if x_exciting_dos_fermi is not None:
setattr(msection, 'x_exciting_dos_fermi', x_exciting_dos_fermi)
# energy contributions
energy_contributions = iteration.get('energy_contributions', {})
for key, names in self._energy_keys_mapping.items():
val = None
for name in names:
val = energy_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key.startswith('energy_'):
sec_energy.m_add_sub_section(getattr(
Energy, key.replace('energy_', '')), EnergyEntry(value=val))
else:
setattr(msection, key, val)
if key == 'x_exciting_fermi_energy':
sec_energy.fermi = val
# charge contributions
charge_contributions = iteration.get('charge_contributions', {})
for key, names in self._electron_charge_keys_mapping.items():
val = None
for name in names:
val = charge_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key == 'x_exciting_section_MT_charge_atom':
for n in range(len(val)):
sec_mt_charge_atom = msection.m_create(x_exciting_section_MT_charge_atom)
sec_mt_charge_atom.x_exciting_MT_charge_atom_index = n + 1
sec_mt_charge_atom.x_exciting_MT_charge_atom_symbol = val[n][0]
sec_mt_charge_atom.x_exciting_MT_charge_atom_value = val[n][1]
sec_charges = msection.m_create(Charges)
sec_charges.value = [
val[n][1].magnitude for n in range(len(val))] * val[0][1].units
sec_charges.total = charge_contributions.get('total charge')
elif key == 'charge_total':
pass
else:
setattr(msection, key, val)
# moment contributions
moment_contributions = iteration.get('moment_contributions', {})
for key, names in self._moment_keys_mapping.items():
val = None
for name in names:
val = moment_contributions.get(name, None)
if val is not None:
break
if val is None:
continue
if key == 'x_exciting_section_MT_moment_atom':
for n in range(len(val)):
sec_mt_moment_atom = msection.m_create(x_exciting_section_MT_moment_atom)
sec_mt_moment_atom.x_exciting_MT_moment_atom_index = n + 1
sec_mt_moment_atom.x_exciting_MT_moment_atom_symbol = val[n][0]
sec_mt_moment_atom.x_exciting_MT_moment_atom_value = val[n][1]
else:
setattr(msection, key, val)
# convergence values
for name in self.info_parser._convergence_keys_mapping.keys():
val = iteration.get(name)
if val is None:
continue
setattr(msection, name, val)
# other metainfo
for name in self.info_parser._miscellaneous_keys_mapping.keys():
val = iteration.get(name)
if val is None:
continue
if name == 'time':
msection.time_calculation = val
else:
setattr(msection, name, val)
# energy, moment, charge contributions
parse_scf(final, sec_scc)
# forces
forces = section.get('forces')
if forces is not None:
sec_forces = sec_scc.m_create(Forces)
sec_forces.total = ForcesEntry(value=forces)
# scf iterations
scf_iterations = section.get('scf_iteration', [])
for scf_iteration in scf_iterations:
sec_scf_iteration = sec_scc.m_create(ScfIteration)
parse_scf(scf_iteration, sec_scf_iteration)
return sec_scc
def parse_system(self, section):
sec_run = self.archive.run[-1]
positions = self.info_parser.get_atom_positions(section.get('atomic_positions', {}))
lattice_vectors = self.info_parser.get_initialization_parameter('lattice_vectors')
atom_labels = self.info_parser.get_atom_labels(section.get('atomic_positions', {}))
input_file = self.get_exciting_files('input.xml')
if positions is None:
# get it from input.xml
for f in input_file:
self.input_xml_parser.mainfile = f
positions = self.input_xml_parser.get('structure/species/atom/coord')
lattice_vectors = self.input_xml_parser.get(
'structure/crystal/basevect', np.eye(3))
species = self.input_xml_parser.get('structure/species/speciesfile')
if positions is None or lattice_vectors is None or species is None:
continue
lattice_vectors = np.array(lattice_vectors, dtype=float)
lattice_vectors *= self.input_xml_parser.get('structure/crystal/scale', 1.0)
positions = np.dot(positions, lattice_vectors) * ureg.bohr
lattice_vectors = lattice_vectors * ureg.bohr
atoms = self.input_xml_parser.get('structure/species/atom')
atom_labels = []
for n in range(len(atoms)):
atom_labels.extend([species[n].split('.')[0]] * len(atoms[n]))
if positions is None or atom_labels is None:
return
sec_system = sec_run.m_create(System)
sec_atoms = sec_system.m_create(Atoms)
sec_atoms.positions = positions
sec_atoms.labels = atom_labels
sec_atoms.periodic = [True] * 3
# TODO confirm no cell optimization in exciting
sec_atoms.lattice_vectors = lattice_vectors
lattice_vectors_reciprocal = self.info_parser.get_initialization_parameter(
'lattice_vectors_reciprocal')
sec_atoms.lattice_vectors_reciprocal = lattice_vectors_reciprocal
if len(sec_run.system) > 1:
return sec_system
for name in self.info_parser._system_keys_mapping.keys():
val = self.info_parser.get_initialization_parameter(name)
if val is None:
continue
if name == 'x_exciting_spin_treatment':
sub_sec = sec_system.m_create(x_exciting_section_spin)
sub_sec.x_exciting_spin_treatment = val
elif name == 'x_exciting_species_rtmin':
setattr(sec_system, name, ' '.join([str(v) for v in val]))
else:
try:
setattr(sec_system, name, val)
except Exception:
self.logger.warn('Error setting metainfo.')
# species
species = self.info_parser.get_initialization_parameter('species', [])
for specie in species:
sec_atoms_group = sec_system.m_create(x_exciting_section_atoms_group)
sec_atoms_group.x_exciting_geometry_atom_labels = specie.get('symbol')
sec_atoms_group.x_exciting_geometry_atom_number = str(specie.get('number'))
sec_atoms_group.x_exciting_muffin_tin_points = specie.get('radial_points')
sec_atoms_group.x_exciting_muffin_tin_radius = specie.get('muffin_tin_radius')
positions_format = specie.get('positions_format')
sec_atoms_group.x_exciting_atom_position_format = positions_format
positions = specie.get('positions')
positions = self.info_parser.get_atom_positions(
positions=positions, positions_format=positions_format).to('m')
sec_atoms_group.x_exciting_geometry_atom_positions = positions.magnitude
# clathrate info
clathrate_file = self.get_exciting_files('str.out')
if clathrate_file:
sec_system.x_exciting_clathrates = True
self.data_clathrate_parser.mainfile = clathrate_file[0]
if self.data_clathrate_parser.data:
data = np.transpose(self.data_clathrate_parser.data)
sec_system.x_exciting_clathrates_atom_coordinates = np.transpose(
np.array(data[:3], dtype=float))
sec_system.x_exciting_clathrates_atom_labels = list(data[3])
else:
sec_system.x_exciting_clathrates = False
potential_mixing = self.info_parser.get_initialization_parameter('potential_mixing')
if potential_mixing is not None:
sec_system.x_exciting_potential_mixing = potential_mixing
return sec_system
def parse_configurations(self):
sec_run = self.archive.run[-1]
def parse_configuration(section):
if not section:
return
sec_scc = self.parse_scc(section)
if sec_scc is None:
return
sec_system = self.parse_system(section)
if sec_system is not None:
sec_scc.system_ref = sec_system
sec_scc.method_ref = sec_run.method[-1]
return sec_scc
# groundstate and hybrids calculation
for module in ['groundstate', 'hybrids']:
sec_scc = parse_configuration(self.info_parser.get(module))
if sec_scc is None:
continue
# add data to scc
# TODO add support for more output files and properties
exciting_files = ['EIGVAL.OUT', 'FERMISURF.bxsf', 'FS.bxsf']
# Parse DFT DOS from one of the files
bs_files = ['dos.xml', 'TDOS.OUT']
for fname in bs_files:
if self.file_exists(fname):
exciting_files.append(fname)
break
# Parse DFT band structure from one of the files
bs_files = ['bandstructure.xml', 'BAND.OUT', 'bandstructure.dat']
for fname in bs_files:
if self.file_exists(fname):
exciting_files.append(fname)
break
for f in exciting_files:
self.parse_file(f, sec_scc)
# structure optimization
structure_optimization = self.info_parser.get('structure_optimization', {})
for optimization_step in structure_optimization.get('optimization_step', []):
sec_scc = parse_configuration(optimization_step)
if optimization_step.get('method') is not None:
sec_scc.x_exciting_geometry_optimization_method = optimization_step.get('method')
if optimization_step.get('step') is not None:
sec_scc.x_exciting_geometry_optimization_step = optimization_step.get('step')
force_convergence = optimization_step.get('force_convergence')
if force_convergence is not None:
sec_scc.x_exciting_maximum_force_magnitude = force_convergence[0]
sec_scc.x_exciting_geometry_optimization_threshold_force = force_convergence[1]
sec_scc = parse_configuration(structure_optimization)
if sec_scc is None:
return
# volume optimizations
volume_index = 1
while True:
info_volume = self.get_exciting_files('run_dir%s/INFO.OUT' % str(volume_index).rjust(2, '0'))
if not info_volume:
break
sec_scc.calculations_path.append(info_volume[0])
def init_parser(self):
self.info_parser.mainfile = self.filepath
self.info_parser.logger = self.logger
self.dos_parser.logger = self.logger
self.bandstructure_parser.logger = self.logger
self.eigval_parser.logger = self.logger
self.fermisurf_parser.logger = self.logger
self.evalqp_parser.logger = self.logger
self.dos_out_parser.logger = self.logger
self.bandstructure_dat_parser.logger = self.logger
self.band_out_parser.logger = self.logger
self.info_gw_parser.logger = self.logger
self.input_xml_parser.logger = self.logger
self.data_xs_parser.logger = self.logger
self.data_clathrate_parser.logger = self.logger
def reuse_parser(self, parser):
self.info_parser.quantities = parser.info_parser.quantities
self.eigval_parser.quantities = parser.eigval_parser.quantities
self.fermisurf_parser.quantities = parser.fermisurf_parser.quantities
self.evalqp_parser.quantities = parser.evalqp_parser.quantities
self.info_gw_parser.quantities = parser.info_gw_parser.quantities
def parse(self, filepath, archive, logger):
self.filepath = filepath
self.archive = archive
self.logger = logger if logger is not None else logging
self._calculation_type = None
self.init_parser()
sec_run = self.archive.m_create(Run)
sec_run.program = Program(
name='exciting', version=self.info_parser.get('program_version', '').strip())
# method goes first since reference needed for sec_scc
self.parse_method()
self.parse_configurations()
self.parse_gw()
self.parse_xs()
self.parse_miscellaneous()
| 1.804688 | 2 |
services/storage/client-sdk/python/simcore_service_storage_sdk/api/users_api.py | KZzizzle/osparc-simcore | 0 | 2981 | # coding: utf-8
"""
simcore-service-storage API
API definition for simcore-service-storage service # noqa: E501
OpenAPI spec version: 0.1.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from simcore_service_storage_sdk.api_client import ApiClient
class UsersApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def check_action_post(self, action, **kwargs): # noqa: E501
"""Test checkpoint to ask server to fail or echo back the transmitted data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_action_post(action, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str action: (required)
:param str data:
:param FakeType fake_type:
:return: FakeEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.check_action_post_with_http_info(action, **kwargs) # noqa: E501
else:
(data) = self.check_action_post_with_http_info(action, **kwargs) # noqa: E501
return data
def check_action_post_with_http_info(self, action, **kwargs): # noqa: E501
"""Test checkpoint to ask server to fail or echo back the transmitted data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_action_post_with_http_info(action, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str action: (required)
:param str data:
:param FakeType fake_type:
:return: FakeEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['action', 'data', 'fake_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method check_action_post" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'action' is set
if ('action' not in local_var_params or
local_var_params['action'] is None):
raise ValueError("Missing the required parameter `action` when calling `check_action_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'action' in local_var_params:
path_params['action'] = local_var_params['action'] # noqa: E501
query_params = []
if 'data' in local_var_params:
query_params.append(('data', local_var_params['data'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'fake_type' in local_var_params:
body_params = local_var_params['fake_type']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/check/{action}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FakeEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_file(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Deletes File # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_file_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
else:
(data) = self.delete_file_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
return data
def delete_file_with_http_info(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Deletes File # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_file_with_http_info(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file_id', 'location_id', 'user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file_id' is set
if ('file_id' not in local_var_params or
local_var_params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `delete_file`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ValueError("Missing the required parameter `location_id` when calling `delete_file`") # noqa: E501
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `delete_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_id' in local_var_params:
path_params['fileId'] = local_var_params['file_id'] # noqa: E501
if 'location_id' in local_var_params:
path_params['location_id'] = local_var_params['location_id'] # noqa: E501
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locations/{location_id}/files/{fileId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def download_file(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Returns download link for requested file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_file(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:return: PresignedLinkEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.download_file_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
else:
(data) = self.download_file_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
return data
def download_file_with_http_info(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Returns download link for requested file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_file_with_http_info(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:return: PresignedLinkEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file_id', 'location_id', 'user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method download_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file_id' is set
if ('file_id' not in local_var_params or
local_var_params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `download_file`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ValueError("Missing the required parameter `location_id` when calling `download_file`") # noqa: E501
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `download_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_id' in local_var_params:
path_params['fileId'] = local_var_params['file_id'] # noqa: E501
if 'location_id' in local_var_params:
path_params['location_id'] = local_var_params['location_id'] # noqa: E501
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locations/{location_id}/files/{fileId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PresignedLinkEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_file_metadata(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Get File Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_metadata(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:return: FileMetaDataEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_file_metadata_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
else:
(data) = self.get_file_metadata_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
return data
def get_file_metadata_with_http_info(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Get File Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_file_metadata_with_http_info(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:return: FileMetaDataEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file_id', 'location_id', 'user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_file_metadata" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file_id' is set
if ('file_id' not in local_var_params or
local_var_params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `get_file_metadata`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ValueError("Missing the required parameter `location_id` when calling `get_file_metadata`") # noqa: E501
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_file_metadata`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_id' in local_var_params:
path_params['fileId'] = local_var_params['file_id'] # noqa: E501
if 'location_id' in local_var_params:
path_params['location_id'] = local_var_params['location_id'] # noqa: E501
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locations/{location_id}/files/{fileId}/metadata', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileMetaDataEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_files_metadata(self, location_id, user_id, **kwargs): # noqa: E501
"""Get Files Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_files_metadata(location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str location_id: (required)
:param str user_id: (required)
:param str uuid_filter:
:return: FileMetaDataArrayEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_files_metadata_with_http_info(location_id, user_id, **kwargs) # noqa: E501
else:
(data) = self.get_files_metadata_with_http_info(location_id, user_id, **kwargs) # noqa: E501
return data
def get_files_metadata_with_http_info(self, location_id, user_id, **kwargs): # noqa: E501
"""Get Files Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_files_metadata_with_http_info(location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str location_id: (required)
:param str user_id: (required)
:param str uuid_filter:
:return: FileMetaDataArrayEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['location_id', 'user_id', 'uuid_filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_files_metadata" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ValueError("Missing the required parameter `location_id` when calling `get_files_metadata`") # noqa: E501
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_files_metadata`") # noqa: E501
collection_formats = {}
path_params = {}
if 'location_id' in local_var_params:
path_params['location_id'] = local_var_params['location_id'] # noqa: E501
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id'])) # noqa: E501
if 'uuid_filter' in local_var_params:
query_params.append(('uuid_filter', local_var_params['uuid_filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locations/{location_id}/files/metadata', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileMetaDataArrayEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_storage_locations(self, user_id, **kwargs): # noqa: E501
"""Get available storage locations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_storage_locations(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: (required)
:return: FileLocationArrayEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_storage_locations_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_storage_locations_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_storage_locations_with_http_info(self, user_id, **kwargs): # noqa: E501
"""Get available storage locations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_storage_locations_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: (required)
:return: FileLocationArrayEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['user_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_storage_locations" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_storage_locations`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileLocationArrayEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def health_check(self, **kwargs): # noqa: E501
"""Service health-check endpoint # noqa: E501
Some general information on the API and state of the service behind # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.health_check(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HealthCheckEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.health_check_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.health_check_with_http_info(**kwargs) # noqa: E501
return data
def health_check_with_http_info(self, **kwargs): # noqa: E501
"""Service health-check endpoint # noqa: E501
Some general information on the API and state of the service behind # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.health_check_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: HealthCheckEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method health_check" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HealthCheckEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_file_meta_data(self, file_id, location_id, **kwargs): # noqa: E501
"""Update File Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_meta_data(file_id, location_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param FileMetaDataType file_meta_data_type:
:return: FileMetaDataEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_file_meta_data_with_http_info(file_id, location_id, **kwargs) # noqa: E501
else:
(data) = self.update_file_meta_data_with_http_info(file_id, location_id, **kwargs) # noqa: E501
return data
def update_file_meta_data_with_http_info(self, file_id, location_id, **kwargs): # noqa: E501
"""Update File Metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_file_meta_data_with_http_info(file_id, location_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param FileMetaDataType file_meta_data_type:
:return: FileMetaDataEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file_id', 'location_id', 'file_meta_data_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_file_meta_data" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file_id' is set
if ('file_id' not in local_var_params or
local_var_params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `update_file_meta_data`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ValueError("Missing the required parameter `location_id` when calling `update_file_meta_data`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_id' in local_var_params:
path_params['fileId'] = local_var_params['file_id'] # noqa: E501
if 'location_id' in local_var_params:
path_params['location_id'] = local_var_params['location_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'file_meta_data_type' in local_var_params:
body_params = local_var_params['file_meta_data_type']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locations/{location_id}/files/{fileId}/metadata', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='FileMetaDataEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_file(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Returns upload link or performs copy operation to datcore # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_file(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:param str extra_location:
:param str extra_source:
:return: PresignedLinkEnveloped
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.upload_file_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
else:
(data) = self.upload_file_with_http_info(file_id, location_id, user_id, **kwargs) # noqa: E501
return data
def upload_file_with_http_info(self, file_id, location_id, user_id, **kwargs): # noqa: E501
"""Returns upload link or performs copy operation to datcore # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_file_with_http_info(file_id, location_id, user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str file_id: (required)
:param str location_id: (required)
:param str user_id: (required)
:param str extra_location:
:param str extra_source:
:return: PresignedLinkEnveloped
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['file_id', 'location_id', 'user_id', 'extra_location', 'extra_source'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_file" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'file_id' is set
if ('file_id' not in local_var_params or
local_var_params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `upload_file`") # noqa: E501
# verify the required parameter 'location_id' is set
if ('location_id' not in local_var_params or
local_var_params['location_id'] is None):
raise ValueError("Missing the required parameter `location_id` when calling `upload_file`") # noqa: E501
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `upload_file`") # noqa: E501
collection_formats = {}
path_params = {}
if 'file_id' in local_var_params:
path_params['fileId'] = local_var_params['file_id'] # noqa: E501
if 'location_id' in local_var_params:
path_params['location_id'] = local_var_params['location_id'] # noqa: E501
query_params = []
if 'user_id' in local_var_params:
query_params.append(('user_id', local_var_params['user_id'])) # noqa: E501
if 'extra_location' in local_var_params:
query_params.append(('extra_location', local_var_params['extra_location'])) # noqa: E501
if 'extra_source' in local_var_params:
query_params.append(('extra_source', local_var_params['extra_source'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locations/{location_id}/files/{fileId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PresignedLinkEnveloped', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 2.015625 | 2 |
reservation_management/migrations/0021_delete_greenpass.py | mattiolato98/reservation-ninja | 1 | 2982 | <filename>reservation_management/migrations/0021_delete_greenpass.py
# Generated by Django 3.2.7 on 2021-10-22 14:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reservation_management', '0020_greenpass'),
]
operations = [
migrations.DeleteModel(
name='GreenPass',
),
]
| 1.429688 | 1 |
demos/iaf_pop_demo.py | bionet/ted.python | 4 | 2983 | <reponame>bionet/ted.python
#!/usr/bin/env python
"""
Demos of encoding and decoding algorithms using populations of
IAF neurons.
"""
# Copyright (c) 2009-2015, <NAME>
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import sys
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.iaf as iaf
# For determining output plot file names:
output_name = 'iaf_pop_demo_'
output_count = 0
output_ext = '.png'
# Define algorithm parameters and input signal:
dur = 0.1
dt = 1e-6
f = 32
bw = 2*np.pi*f
t = np.arange(0, dur, dt)
np.random.seed(0)
noise_power = None
if noise_power == None:
fig_title = 'IAF Input Signal with No Noise'
else:
fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power
print fig_title
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power)
pl.plot_signal(t, u, fig_title,
output_name + str(output_count) + output_ext)
# Test leaky IAF algorithms:
b1 = 3.5 # bias
d1 = 0.7 # threshold
R1 = 10.0 # resistance
C1 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b1, d1, R1, C1)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b2 = 3.4 # bias
d2 = 0.8 # threshold
R2 = 9.0 # resistance
C2 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b2, d2, R2, C2)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b_list = np.array([b1, b2])
d_list = np.array([d1, d2])
R_list = np.array([R1, R2])
C_list = np.array([C1, C2])
output_count += 1
fig_title = 'Signal Encoded Using Leaky IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_pop)([u, u], dt, b_list, d_list, R_list, C_list)
pl.plot_encoded(t, u, s_list[0], fig_title + ' #1',
output_name + str(output_count) + output_ext)
output_count += 1
pl.plot_encoded(t, u, s_list[1], fig_title + ' #2',
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Leaky IAF Population Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode_pop)(s_list, dur, dt, bw,
b_list, d_list, R_list,
C_list)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
# Test ideal IAF algorithms:
b1 = 3.5 # bias
d1 = 0.7 # threshold
R1 = np.inf # resistance
C1 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b1, d1, R1, C1)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b2 = 3.4 # bias
d2 = 0.8 # threshold
R2 = np.inf # resistance
C2 = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b2, d2, R2, C2)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
b_list = [b1, b2]
d_list = [d1, d2]
R_list = [R1, R2]
C_list = [C1, C2]
output_count += 1
fig_title = 'Signal Encoded Using Ideal IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_pop)([u, u], dt, b_list, d_list, R_list, C_list)
pl.plot_encoded(t, u, s_list[0], fig_title + ' #1',
output_name + str(output_count) + output_ext)
output_count += 1
pl.plot_encoded(t, u, s_list[1], fig_title + ' #2',
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Ideal IAF Population Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode_pop)(s_list, dur, dt, bw,
b_list, d_list, R_list,
C_list)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
| 2.46875 | 2 |
regtests/calling/function_expression.py | bpmbank/PythonJS | 319 | 2984 | """func expr"""
F = function( x,y ):
return x+y
def main():
TestError( F(1,2) == 3 )
| 3.09375 | 3 |
nadmin/plugins/sortable.py | A425/django-xadmin-1.8 | 1 | 2985 | #coding:utf-8
from nadmin.sites import site
from nadmin.views import BaseAdminPlugin, ListAdminView
SORTBY_VAR = '_sort_by'
class SortablePlugin(BaseAdminPlugin):
sortable_fields = ['sort']
# Media
def get_media(self, media):
if self.sortable_fields and self.request.GET.get(SORTBY_VAR):
media = media + self.vendor('nadmin.plugin.sortable.js')
return media
# Block Views
def block_top_toolbar(self, context, nodes):
if self.sortable_fields:
pass
# current_refresh = self.request.GET.get(REFRESH_VAR)
# context.update({
# 'has_refresh': bool(current_refresh),
# 'clean_refresh_url': self.admin_view.get_query_string(remove=(REFRESH_VAR,)),
# 'current_refresh': current_refresh,
# 'refresh_times': [{
# 'time': r,
# 'url': self.admin_view.get_query_string({REFRESH_VAR: r}),
# 'selected': str(r) == current_refresh,
# } for r in self.refresh_times],
# })
# nodes.append(loader.render_to_string('nadmin/blocks/refresh.html', context_instance=context))
site.register_plugin(SortablePlugin, ListAdminView)
| 1.828125 | 2 |
batch-tmp.py | texastribune/donations | 6 | 2986 | <filename>batch-tmp.py
import logging
from config import ACCOUNTING_MAIL_RECIPIENT, LOG_LEVEL, REDIS_URL, TIMEZONE
from datetime import datetime, timedelta
from pytz import timezone
import celery
import redis
from charges import amount_to_charge, charge, ChargeException
from npsp import Opportunity
from util import send_email
zone = timezone(TIMEZONE)
log_level = logging.getLevelName(LOG_LEVEL)
root = logging.getLogger()
root.setLevel(log_level)
class Log(object):
"""
This encapulates sending to the console/stdout and email all in one.
"""
def __init__(self):
self.log = list()
def it(self, string):
"""
Add something to the log.
"""
logging.debug(string)
self.log.append(string)
def send(self):
"""
Send the assembled log out as an email.
"""
body = "\n".join(self.log)
recipient = ACCOUNTING_MAIL_RECIPIENT
subject = "Batch run"
send_email(body=body, recipient=recipient, subject=subject)
class AlreadyExecuting(Exception):
"""
Here to show when more than one job of the same type is running.
"""
pass
class Lock(object):
"""
Claim an exclusive lock. Using Redis.
"""
def __init__(self, key):
self.key = key
self.connection = redis.from_url(REDIS_URL)
def acquire(self):
if self.connection.get(self.key):
raise AlreadyExecuting
self.connection.setex(name=self.key, value="bar", time=1200)
def release(self):
self.connection.delete(self.key)
# TODO stop sending this email and just rely on Sentry and logs?
@celery.task()
def charge_cards():
lock = Lock(key="charge-cards-lock")
lock.acquire()
log = Log()
log.it("---Starting batch job...")
three_days_ago = (datetime.now(tz=zone) - timedelta(days=10)).strftime("%Y-%m-%d")
today = datetime.now(tz=zone).strftime("%Y-%m-%d")
opportunities = Opportunity.list(begin=three_days_ago, end=today)
log.it("---Processing charges...")
log.it(f"Found {len(opportunities)} opportunities available to process.")
for opportunity in opportunities:
if not opportunity.stripe_customer:
continue
amount = amount_to_charge(opportunity)
log.it(
f"---- Charging ${amount} to {opportunity.stripe_customer} ({opportunity.name})"
)
try:
charge(opportunity)
except ChargeException as e:
logging.info("Batch charge error")
e.send_slack_notification()
log.send()
lock.release()
if __name__ == "__main__":
charge_cards()
| 2.328125 | 2 |
rotkehlchen/tests/integration/test_blockchain.py | coblee/rotki | 0 | 2987 | import operator
import os
from unittest.mock import patch
import pytest
import requests
from rotkehlchen.chain.ethereum.manager import NodeName
from rotkehlchen.constants.assets import A_BTC
from rotkehlchen.tests.utils.blockchain import mock_etherscan_query
from rotkehlchen.typing import SupportedBlockchain
@pytest.mark.skipif(
os.name == 'nt',
reason='Not testing running with geth in windows at the moment',
)
@pytest.mark.parametrize('have_blockchain_backend', [True])
def test_eth_connection_initial_balances(
blockchain,
inquirer, # pylint: disable=unused-argument
):
"""TODO for this test. Either:
1. Not use own chain but use a normal open node for this test.
2. If we use own chain, deploy the eth-scan contract there.
But probably (1) makes more sense
"""
msg = 'Should be connected to ethereum node'
assert blockchain.ethereum.web3_mapping.get(NodeName.OWN) is not None, msg
def test_query_btc_balances(blockchain):
blockchain.query_btc_balances()
assert 'BTC' not in blockchain.totals
account = '<KEY>'
blockchain.modify_btc_account(account, 'append', operator.add)
blockchain.query_btc_balances()
assert blockchain.totals[A_BTC].usd_value is not None
assert blockchain.totals[A_BTC].amount is not None
@pytest.mark.parametrize('number_of_eth_accounts', [0])
def test_add_remove_account_assure_all_balances_not_always_queried(blockchain):
"""Due to a programming mistake at addition and removal of blockchain accounts
after the first time all balances were queried every time. That slowed
everything down (https://github.com/rotki/rotki/issues/678).
This is a regression test for that behaviour
TODO: Is this still needed? Shouldn't it just be removed?
Had to add lots of mocks to make it not be a slow test
"""
addr1 = '0xe188c6BEBB81b96A65aa20dDB9e2aef62627fa4c'
addr2 = '<KEY>'
etherscan_patch = mock_etherscan_query(
eth_map={addr1: {'ETH': 1}, addr2: {'ETH': 2}},
etherscan=blockchain.ethereum.etherscan,
original_requests_get=requests.get,
original_queries=[],
)
ethtokens_max_chunks_patch = patch(
'rotkehlchen.chain.ethereum.tokens.ETHERSCAN_MAX_TOKEN_CHUNK_LENGTH',
new=800,
)
with etherscan_patch, ethtokens_max_chunks_patch:
blockchain.add_blockchain_accounts(
blockchain=SupportedBlockchain.ETHEREUM,
accounts=[addr1],
)
assert addr1 in blockchain.accounts.eth
with etherscan_patch, ethtokens_max_chunks_patch, patch.object(blockchain, 'query_balances') as mock: # noqa: E501
blockchain.remove_blockchain_accounts(
blockchain=SupportedBlockchain.ETHEREUM,
accounts=[addr1],
)
assert addr1 not in blockchain.accounts.eth
assert mock.call_count == 0, 'blockchain.query_balances() should not have been called'
addr2 = '0x78a087fCf440315b843632cFd6FDE6E5adcCc2C2'
with etherscan_patch, ethtokens_max_chunks_patch, patch.object(blockchain, 'query_balances') as mock: # noqa: E501
blockchain.add_blockchain_accounts(
blockchain=SupportedBlockchain.ETHEREUM,
accounts=[addr2],
)
| 2.15625 | 2 |
__init__.py | LaptopBiologist/ReferenceAnalyzer | 0 | 2988 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: I am
#
# Created: 02/11/2017
# Copyright: (c) I am 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
| 1.8125 | 2 |
app/__init__.py | jimmybutton/moviedb | 0 | 2989 | from flask import Flask
from config import Config
from sqlalchemy import MetaData
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_moment import Moment
from flask_misaka import Misaka
from flask_bootstrap import Bootstrap
import os
import logging
from logging.handlers import RotatingFileHandler
from elasticsearch import Elasticsearch
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=convention)
db = SQLAlchemy(metadata=metadata)
migrate = Migrate()
login = LoginManager()
login.login_view = "auth.login"
moment = Moment()
md = Misaka()
bootstrap = Bootstrap()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
with app.app_context():
if db.engine.url.drivername == 'sqlite':
migrate.init_app(app, db, render_as_batch=True)
else:
migrate.init_app(app, db)
# migrate.init_app(app, db)
login.init_app(app)
moment.init_app(app)
md.init_app(app)
bootstrap.init_app(app)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.cli import bp as cli_bp
app.register_blueprint(cli_bp)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
from app import models
if not app.debug and not app.testing:
if not os.path.exists("logs"):
os.mkdir("logs")
file_handler = RotatingFileHandler(
"logs/moviedb.log", maxBytes=10240, backupCount=10
)
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"
)
)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info("Moviedb startup")
return app
| 2.28125 | 2 |
optimize.py | AranKomat/Sequential-Alpha-Zero | 7 | 2990 | <gh_stars>1-10
import numpy as np
import random
from time import time, sleep
import h5py
import torch
import torch.nn as nn
import torch.optim as optimizer
import glob
import os
#from scipy.stats import rankdata
from lstm import Model, initialize
from Optim import ScheduledOptim
# import _pickle as cPickle
# np.set_printoptions(threshold=np.nan)
def start(config):
model = Model(config)
model = model.to(config.device)
#optim = optimizer.SGD(model.parameters(), lr=2e-4, momentum=0.9, weight_decay=config.c)
#lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, step_size=200, gamma=0.1) # 20M iters
optim = ScheduledOptim(
optimizer.Adam(
filter(lambda p: p.requires_grad, model.parameters()), lr=config.lr,
betas=(0.9, 0.98), eps=1e-09),
config.hidden_dim, 2000)
list_of_files = glob.glob(config.model_path + '/*')
latest_file = None
if list_of_files:
latest_file = max(list_of_files, key=os.path.getctime)
model_ckpt = latest_file
# model_ckpt = config.model_path + '/model-454.pth'
print(model_ckpt)
if model_ckpt:
checkpoint = torch.load(model_ckpt)
model.load_state_dict(checkpoint['state_dict'])
optim.optimizer.load_state_dict(checkpoint['optimizer'])
start_iter = model_ckpt.split('-')[-1].split('.')[0]
start_iter = int(start_iter)
else:
model.apply(initialize)
start_iter = 0
count = 0
for iter in range(start_iter, config.total_iterations):
print('iteration: %s' % iter)
#if (iter + 1) % 100000 == 0:
# lr_scheduler.step()
start_time = time()
optim.update_learning_rate(iter)
# reads the randomly sampled (s,pi,z)'s from the buffer
# ~ 0.1s
# TODO: if error, set a lock
# translate, _ = cPickle.load(open('save/vocab_cotra.pkl', 'rb'))
with h5py.File("buffer", "r") as f:
cur_row = int(f['/cur_row'][0])
s_buffer = f['/s']
pi_buffer = f['/pi']
z_buffer = f['/z']
s_tmp = []
pi_tmp = []
z_tmp = []
df = cur_row - count
'''x = np.bincount(s_buffer[:,1].astype(int)) / 500000
for i in range(len(x)):
if x[i] > 0.01:
print(i, x[i], translate[i])
break'''
if count == 0:
count = cur_row
t_inf = time()
if count != 0 and df >= 1000:
print('time required for 32 self-play games: ', 32 * (time() - t_inf) / df)
t_inf = time()
count = cur_row
if cur_row >= config.buffer_size:
r = np.sort(
np.random.choice(list(range(0, config.buffer_size)), (config.batch_size // 2), replace=False))
else:
r = np.sort(
np.random.choice(list(range(0, cur_row)), (config.batch_size // 2), replace=False))
tmp = []
# randomly sample rows 8 times for a dramatic speedup.
num_segments = 8
for i in range(num_segments):
tmp.append(
r[(config.batch_size // 2) // num_segments * i:(config.batch_size // 2) // num_segments * (i + 1)])
for i in range(num_segments):
s_tmp.append(s_buffer[tmp[i], :config.max_length])
pi_tmp.append(pi_buffer[tmp[i], :config.max_length, ...])
z_tmp.append(z_buffer[tmp[i], ...])
s = np.concatenate(s_tmp, 0)
pi = np.concatenate(pi_tmp, 0)
z = np.concatenate(z_tmp, 0)
# print('io time: ',time() - start_time)
# decompresses sampled pi's
# takes about 0.005s
new_pi = np.zeros(((config.batch_size // 2), config.max_length, config.vocab_size))
for i in range((config.batch_size // 2)):
for j in range(config.max_length):
if pi[i, j, 0] == -1: # meaning the terminal state; pi=0
new_pi[i, j, :] = 0
elif pi[i, j, 0] == -2 or sum(pi[i, j, :]) == 0: # meaning the padding; place -1 padding
new_pi[i, j, :] = -1
else:
# Beware that np.bincount's bin is [0,1,...min_length-1]
new_pi[i, j, :] = np.bincount(pi[i, j, :].astype(int),
minlength=config.vocab_size) / config.simulation_num_per_move
pi = new_pi
# creating a mask for loss function and preparing a minibatch
def generate_mask(array):
new_array = np.zeros_like(array)
for i in range(len(array)):
for j in range(len(array[i])):
if j == len(array[i]) - 1:
new_array[i, :] = 1
elif array[i, j] == config.period_token:
new_array[i, :j + 1] = 1
break
elif array[i, j] == config.blank_token:
new_array[i, :j] = 1
break
return new_array
def pi_mask(array):
array = array[:, 1:]
array = np.pad(array, ((0, 0), (0, 1)), 'constant')
return generate_mask(array)
# pi_tmp isn't modified here, since the mask will be modified appropriately
_, pi_mask = pi_mask(s)
z_mask = generate_mask(s)
z_batch = np.concatenate(
[np.ones([(config.batch_size // 2), config.max_length]) * (-1),
np.ones([(config.batch_size // 2), config.max_length])])
def convert(x):
return torch.tensor(x.astype(np.float32), device=config.device)
t2 = time()
# gradient update
model.train()
cache = []
for i in range(config.depth // config.unit_depth):
cache += [torch.zeros(config.batch_size, config.hidden_dim,device=config.device),
torch.zeros(config.batch_size, config.hidden_dim,device=config.device)]
s_batch = convert(np.array(s)).long()
policy, v, cache = model(s_batch, tuple(cache))
def loss_policy(y_true, y_pred):
return torch.sum(-y_true * torch.log(y_pred + 1.0e-8), 2)
def loss_value(y_true, y_pred):
return (y_true - y_pred) ** 2
pi_mask = convert(pi_mask)
z_mask = convert(z_mask)
z = convert(z)
pi = convert(pi)
loss = torch.mean(torch.sum(loss_policy(pi, policy) * pi_mask +
loss_value(z, v) * z_mask
, 1) / torch.sum(z_mask, 1))
loss.backward()
gn = nn.utils.clip_grad_norm(model.parameters(), config.clip)
print(gn)
optim.step()
optim.zero_grad()
print("grad update: %s seconds" % (time() - t2))
print("iteration: %s seconds" % (time() - start_time))
checkpoint = {'state_dict': model.state_dict(),
'optimizer': optim.optimizer.state_dict()}
sleep(config.training_sleep_time)
torch.save(checkpoint, config.model_path + '/model' + '-' + str(iter + 1) + '.pth') | 1.820313 | 2 |
src/bin_expr.py | Command-Master/MCCC | 6 | 2991 | from c_int import Int
from casting import cast
from globals_consts import NAMESPACE
from temps import used_temps, get_temp, get_temp_func
def binary_expression(copy_strings, expression, target, variables_name, vtypes):
from expression import generate_expression
c1, t1, tt1 = generate_expression(None, expression.left, vtypes, variables_name, copy_strings, False)
c2, t2, tt2 = generate_expression(None, expression.right, vtypes, variables_name, copy_strings, False)
for ttt in tt1: used_temps.remove(ttt)
for ttt in tt2: used_temps.remove(ttt)
ot = cast(t1, t2)
rt = ot
if expression.op in ['<', '>', '<=', '>=', '==', '!=', '&&']:
rt = Int()
if target is None or target == []:
target = [get_temp() for _ in range(ot.size)]
used_temps.extend(target)
code = ''
if expression.op in ['&&', '||']:
if expression.op == '&&':
code += c1
code += t1.cast(ot, tt1, target)
f2 = get_temp_func()
f2h = open(f'{f2}.mcfunction', 'w')
f2h.write(c2)
f2h.write(t2.cast(ot, tt2, target))
f2h.close()
code += f'execute unless score {target[0]} {NAMESPACE} matches 0 run function {NAMESPACE}:{f2}\n'
elif expression.op == '||':
code += c1
code += t1.cast(ot, tt1, target)
f2 = get_temp_func()
f2h = open(f'{f2}.mcfunction', 'w')
f2h.write(c2)
f2h.write(t2.cast(ot, tt2, target))
f2h.close()
code += f'execute if score {target[0]} {NAMESPACE} matches 0 run function {NAMESPACE}:{f2}\n'
else:
if ot == t1:
code += c1
code += c2
code += t2.cast(ot, tt2, target)
code += ot.binary(expression.op, tt1, target, target)
else:
code += c1
code += t1.cast(ot, tt1, target)
code += c2
code += ot.binary(expression.op, target, tt2, target)
return code, rt, target | 2.5 | 2 |
tools/mkcodelet.py | bobmittmann/yard-ice | 2 | 2992 | <gh_stars>1-10
#!/usr/bin/python
from struct import *
from getopt import *
import sys
import os
import re
def usage():
global progname
print >> sys.stderr, ""
print >> sys.stderr, " Usage:", progname, "[options] fname"
print >> sys.stderr, ""
print >> sys.stderr, "Options"
print >> sys.stderr, " -h, --help show this help message and exit"
print >> sys.stderr, " -o FILENAME, --addr=FILENAME"
print >> sys.stderr, ""
def error(msg):
print >> sys.stderr, ""
print >> sys.stderr, "#error:", msg
usage()
sys.exit(2)
def mk_codelet(in_fname, out_fname, hdr_fname):
try:
in_file = open(in_fname, mode='r')
except:
print >> sys.stderr, "#error: can't open file: '%s'" % in_fname
sys.exit(1)
try:
c_file = open(out_fname, mode='w')
except:
print >> sys.stderr, "#error: can't create file: %s" % out_fname
sys.exit(1)
try:
h_file = open(hdr_fname, mode='w')
except:
print >> sys.stderr, "#error: can't create file: %s" % hdr_fname
sys.exit(1)
i = 0
for line in in_file:
if re.match("SYMBOL TABLE:", line):
break
s_pat = re.compile("([0-9a-f]{8}) ..*[0-9a-f]{8} ([.A-Za-z_][A-Za-z_0-9]*)")
sym = {}
for line in in_file:
m = s_pat.findall(line)
if m:
addr = int(m[0][0], 16)
name = m[0][1]
sym[addr] = name
else:
break
for line in in_file:
if re.match("Contents of section .text:", line):
break
token_pat = re.compile("([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})")
c_file.write("#include <stdint.h>\n\n")
h_file.write("#include <stdint.h>\n\n")
addr = 0
i = 0
for line in in_file:
for a, b, c, d in token_pat.findall(line):
try:
sym[addr]
if (i > 0):
c_file.write("\n};\n\n")
c_file.write("const uint32_t %s[] = {" % sym[addr])
h_file.write("extern const uint32_t %s[];\n\n" % sym[addr])
i = 0
except KeyError:
pass
if ((i % 4) == 0):
if (i > 0):
c_file.write(",")
c_file.write("\n\t0x" + d + c + b + a)
else:
c_file.write(", 0x" + d + c + b + a )
i = i + 1;
addr = addr + 4
c_file.write("\n};\n")
in_file.close()
c_file.close()
h_file.close()
return
def main():
global progname
progname = sys.argv[0]
try:
opts, args = getopt(sys.argv[1:], "ho:", \
["help", "output="])
except GetoptError, err:
error(str(err))
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
out_fname = a
else:
assert False, "unhandled option"
if len(args) == 0:
error("missing fname")
if len(args) > 1:
error("too many arguments")
in_fname = args[0]
try:
out_fname
except NameError:
dirname, fname = os.path.split(in_fname)
basename, extension = os.path.splitext(fname)
out_fname = basename + '.' + 'c'
dirname, fname = os.path.split(out_fname)
basename, extension = os.path.splitext(fname)
hdr_fname = basename + '.' + 'h'
mk_codelet(in_fname, out_fname, hdr_fname)
if __name__ == "__main__":
main()
| 2.703125 | 3 |
utest/x3270/test_screenshot.py | MichaelSeeburger/Robot-Framework-Mainframe-3270-Library | 3 | 2993 | <reponame>MichaelSeeburger/Robot-Framework-Mainframe-3270-Library<gh_stars>1-10
import os
from pytest_mock import MockerFixture
from robot.api import logger
from Mainframe3270.x3270 import x3270
def test_set_screenshot_folder(under_test: x3270):
path = os.getcwd()
under_test.set_screenshot_folder(path)
assert under_test.imgfolder == os.getcwd()
def test_set_screenshot_folder_nonexistent(mocker: MockerFixture, under_test: x3270):
mocker.patch("robot.api.logger.error")
mocker.patch("robot.api.logger.warn")
path = os.path.join(os.getcwd(), "nonexistent")
under_test.set_screenshot_folder(path)
logger.error.assert_called_with('Given screenshots path "%s" does not exist' % path)
logger.warn.assert_called_with(
'Screenshots will be saved in "%s"' % under_test.imgfolder
)
def test_take_screenshot(mocker: MockerFixture, under_test: x3270):
mocker.patch("Mainframe3270.py3270.Emulator.save_screen")
mocker.patch("robot.api.logger.write")
mocker.patch("time.time", return_value=1.0)
under_test.take_screenshot(500, 500)
logger.write.assert_called_with(
'<iframe src="./screenshot_1000.html" height="500" width="500"></iframe>',
level="INFO",
html=True,
)
| 2.21875 | 2 |
splat/photometry.py | brackham/splat | 0 | 2994 | <filename>splat/photometry.py
# -*- coding: utf-8 -*-
from __future__ import print_function, division
"""
.. note::
These are the spectrophotometry functions for SPLAT
"""
# imports - internal
import copy
import os
# imports - external
import numpy
from astropy import units as u # standard units
from astropy import constants as const # physical constants in SI units
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from scipy.integrate import trapz # for numerical integration
from scipy.interpolate import interp1d
# splat functions and constants
from .initialize import *
from .utilities import *
#####################################################
############### SPECTROPHOTOMETRY ###############
#####################################################
# this function has been obseleted
def checkFilter(filt,verbose=True):
output = False
f = copy.deepcopy(filt)
f = f.replace(' ','_').upper()
for k in list(FILTERS.keys()):
if f==k.upper() or f.lower() in FILTERS[k]['altnames']:
output = k
if output == False and verbose == True:
print('\nFilter '+filt+' not currently available for SPLAT; contact '+EMAIL+'\n')
filterInfo()
return output
def filterProfile(filt,**kwargs):
'''
:Purpose: Retrieve the filter profile for a SPLAT filter. Returns two arrays: the filter wavelength and filter transmission curve.
:param filter: String giving the name of one of the predefined filters listed in splat.FILTERS.keys() (required)
:param filterFolder: folder containing the filter transmission files (optional, default = splat.FILTER_FOLDER)
:Example:
>>> import splat
>>> import splat.photometry as spphot
>>> sp = splat.getSpectrum(shortname='1507-1627')[0]
>>> sp.fluxCalibrate('2MASS J',14.5)
>>> spphot.filterMag(sp,'MKO J')
(14.345894376898123, 0.027596454828421831)
'''
# keyword parameters
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
# check that requested filter is in list
f0 = checkFilterName(filt, verbose=True)
if f0 == False: raise ValueError
filt = f0
# read in filter
fwave,ftrans = numpy.genfromtxt(os.path.normpath(filterFolder+FILTERS[filt]['file']), comments='#', unpack=True, missing_values = ('NaN','nan'), filling_values = (numpy.nan))
# print(type(fwave),type(ftrans),isinstance(fwave,numpy.ndarray),isinstance(ftrans,numpy.ndarray),not isinstance(fwave,numpy.ndarray) or not isinstance(ftrans,numpy.ndarray))
if not isinstance(fwave,numpy.ndarray) or not isinstance(ftrans,numpy.ndarray):
raise ValueError('\nProblem reading in {}'.format(filterFolder+FILTERS[filt]['file']))
fwave = fwave[~numpy.isnan(ftrans)]*u.micron
ftrans = ftrans[~numpy.isnan(ftrans)]
return fwave,ftrans
def filterMag(sp,filt,*args,**kwargs):
'''
:Purpose:
Determine the photometric magnitude of a source based on its
spectrum. Spectral fluxes are convolved with the filter profile specified by
the ``filter`` input. By default this filter is also
convolved with a model of Vega to extract Vega magnitudes,
but the user can also specify AB magnitudes, photon flux or energy flux.
:Required Parameters:
**sp**: Spectrum class object, which should contain wave, flux and noise array elements.
**filter**: String giving name of filter, which can either be one of the predefined filters listed in splat.FILTERS.keys() or a custom filter name
:Optional Parameters:
**custom** = None: A 2 x N vector array specifying the wavelengths and transmissions for a custom filter
**notch** = None: A 2 element array that specifies the lower and upper wavelengths for a notch filter (100% transmission within, 0% transmission without)
**vega** = True: compute Vega magnitudes (may be set by filter)
**ab** = False: compute AB magnitudes (may be set by filter)
**energy** = False: compute energy flux
**photon** = False: compute photon flux
**filterFolder** = splat.FILTER_FOLDER: folder containing the filter transmission files
**vegaFile** = 'vega_kurucz.txt': name of file containing Vega flux file, must be within ``filterFolder``
**nsamples** = 100: number of samples to use in Monte Carlo error estimation
**info** = False: List the predefined filter names available
**verbose** = True: List the predefined filter names available
:Example:
>>> import splat
>>> import splat.photometry as spphot
>>> sp = splat.getSpectrum(shortname='1507-1627')[0]
>>> sp.fluxCalibrate('2MASS J',14.5)
>>> spphot.filterMag(sp,'MKO J')
(14.345894376898123, 0.027596454828421831)
'''
# keyword parameters
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
vegaFile = kwargs.get('vegaFile',VEGAFILE)
info = kwargs.get('info',False)
custom = kwargs.get('custom',False)
notch = kwargs.get('notch',False)
vega = kwargs.get('vega',True)
ab = kwargs.get('ab',not vega)
rsr = kwargs.get('rsr',False)
nsamples = kwargs.get('nsamples',100)
verbose = kwargs.get('verbose',False)
# check that requested filter is in list
if isinstance(custom,bool) and isinstance(notch,bool):
f0 = checkFilterName(filt,verbose=True)
if f0 == False:
return numpy.nan, numpy.nan
filt = f0
# reset filter calculation methods based on filter design
if 'ab' in FILTERS[filt]['method']:
ab = kwargs.get('ab',True)
vega = not ab
if 'vega' in FILTERS[filt]['method']:
vega = kwargs.get('vega',True)
ab = not vega
rsr = FILTERS[filt]['rsr']
# other possibilities
photons = kwargs.get('photons',False)
photons = kwargs.get('photon',photons)
energy = kwargs.get('energy',False)
energy = kwargs.get('flux',energy)
if (photons or energy):
vega = False
ab = False
if photons: energy = False
if energy: photons = False
# Read in filter
if isinstance(custom,bool) and isinstance(notch,bool):
fwave,ftrans = filterProfile(filt,**kwargs)
# notch filter
elif isinstance(custom,bool) and isinstance(notch,list):
dn = (notch[1]-notch[0])/1000
fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn)
ftrans = numpy.zeros(len(fwave))
ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1.
# custom filter
else:
fwave,ftrans = custom[0],custom[1]
# units
if isinstance(fwave,u.quantity.Quantity) == True:
fwave = fwave.to(u.micron)
else:
fwave = fwave*u.micron
# check that spectrum and filter cover the same wavelength ranges
if numpy.nanmax(fwave) < numpy.nanmin(sp.wave) or numpy.nanmin(fwave) > numpy.nanmax(sp.wave):
if verbose==True: print('\nWarning: no overlap between spectrum for {} and filter {}'.format(sp.name,filt))
return numpy.nan, numpy.nan
if numpy.nanmin(fwave) < numpy.nanmin(sp.wave) or numpy.nanmax(fwave) > numpy.nanmax(sp.wave):
if verbose==True: print('\nWarning: spectrum for {} does not span full filter profile for {}'.format(sp.name,filt))
# interpolate spectrum onto filter wavelength function
wgood = numpy.where(~numpy.isnan(sp.noise))
if len(sp.wave[wgood]) > 0:
d = interp1d(sp.wave[wgood].value,sp.flux[wgood].value,bounds_error=False,fill_value=0.)
n = interp1d(sp.wave[wgood].value,sp.noise[wgood].value,bounds_error=False,fill_value=0)
# catch for models
else:
if verbose==True: print('\nWarning: data values in range of filter {} have no uncertainties'.format(filt))
d = interp1d(sp.wave.value,sp.flux.value,bounds_error=False,fill_value=0.)
n = interp1d(sp.wave.value,sp.flux.value*1.e-9,bounds_error=False,fill_value=0.)
result = []
if (vega):
# Read in Vega spectrum
vwave,vflux = numpy.genfromtxt(os.path.normpath(filterFolder+vegaFile), comments='#', unpack=True, \
missing_values = ('NaN','nan'), filling_values = (numpy.nan))
vwave = vwave[~numpy.isnan(vflux)]*u.micron
vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron))
vflux.to(sp.flux_unit,equivalencies=u.spectral_density(vwave))
# interpolate Vega onto filter wavelength function
v = interp1d(vwave.value,vflux.value,bounds_error=False,fill_value=0.)
if rsr:
val = -2.5*numpy.log10(trapz(ftrans*fwave.value*d(fwave.value),fwave.value)/trapz(ftrans*fwave.value*v(fwave.value),fwave.value))
else:
val = -2.5*numpy.log10(trapz(ftrans*d(fwave.value),fwave.value)/trapz(ftrans*v(fwave.value),fwave.value))
for i in numpy.arange(nsamples):
# result.append(-2.5*numpy.log10(trapz(ftrans*numpy.random.normal(d(fwave),n(fwave))*sp.flux_unit,fwave)/trapz(ftrans*v(fwave)*sp.flux_unit,fwave)))
if rsr:
result.append(-2.5*numpy.log10(trapz(ftrans*fwave.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)/trapz(ftrans*fwave.value*v(fwave.value),fwave.value)))
else:
result.append(-2.5*numpy.log10(trapz(ftrans*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)/trapz(ftrans*v(fwave.value),fwave.value)))
outunit = 1.
elif (ab):
nu = sp.wave.to('Hz',equivalencies=u.spectral())
fnu = sp.flux.to('Jy',equivalencies=u.spectral_density(sp.wave))
noisenu = sp.noise.to('Jy',equivalencies=u.spectral_density(sp.wave))
filtnu = fwave.to('Hz',equivalencies=u.spectral())
fconst = 3631*u.jansky
d = interp1d(nu.value,fnu.value,bounds_error=False,fill_value=0.)
n = interp1d(nu.value,noisenu.value,bounds_error=False,fill_value=0.)
b = trapz((ftrans/filtnu.value)*fconst.value,filtnu.value)
val = -2.5*numpy.log10(trapz(ftrans*d(filtnu.value)/filtnu.value,filtnu.value)/b)
for i in numpy.arange(nsamples):
a = trapz(ftrans*(d(filtnu.value)+numpy.random.normal(0,1)*n(filtnu.value))/filtnu.value,filtnu.value)
result.append(-2.5*numpy.log10(a/b))
outunit = 1.
elif (energy):
outunit = u.erg/u.s/u.cm**2
if rsr:
a = trapz(ftrans*fwave.value*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit
b = trapz(ftrans*fwave.value,fwave.value)*sp.wave.unit
c = trapz(ftrans*fwave.value*fwave.value,fwave.value)*sp.wave.unit*sp.wave.unit
val = (a/b * c/b).to(outunit).value
else:
a = trapz(ftrans*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit
b = trapz(ftrans,fwave.value)*sp.wave.unit
c = trapz(ftrans*fwave.value,fwave.value)*sp.wave.unit*sp.wave.unit
val = (a/b * c/b).to(outunit).value
for i in numpy.arange(nsamples):
if rsr:
result.append((trapz(ftrans*fwave.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit).to(outunit).value)
else:
result.append((trapz(ftrans*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit).to(outunit).value)
elif (photons):
outunit = 1./u.s/u.cm**2
convert = const.h.to('erg s')*const.c.to('micron/s')
val = (trapz(ftrans*fwave.value*convert.value*d(fwave.value),fwave.value)*sp.wave.unit*sp.flux.unit*convert.unit).to(outunit).value
for i in numpy.arange(nsamples):
result.append((trapz(ftrans*fwave.value*convert.value*(d(fwave.value)+numpy.random.normal(0,1.)*n(fwave.value)),fwave.value)*sp.wave.unit*sp.flux.unit*convert.unit).to(outunit).value)
else:
raise NameError('\nfilterMag not given a correct physical quantity (vega, ab, energy, photons) to compute photometry\n\n')
# val = numpy.nanmean(result)*outunit
err = numpy.nanstd(result)
if len(sp.wave[wgood]) == 0:
err = 0.
return val*outunit,err*outunit
def vegaToAB(filt,vegafile=VEGAFILE,filterfolder=SPLAT_PATH+FILTER_FOLDER,custom=False,notch=False,rsr=False,**kwargs):
# check that requested filter is in list
if isinstance(custom,bool) and isinstance(notch,bool):
f0 = checkFilterName(filt,verbose=True)
if f0 == False:
return numpy.nan, numpy.nan
filt = f0
rsr = FILTERS[filt]['rsr']
# Read in filter
if isinstance(custom,bool) and isinstance(notch,bool):
fwave,ftrans = filterProfile(filt,**kwargs)
# notch filter
elif isinstance(custom,bool) and isinstance(notch,list):
dn = (notch[1]-notch[0])/1000
fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn)
ftrans = numpy.zeros(len(fwave))
ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1.
# custom filter
else:
fwave,ftrans = custom[0],custom[1]
# Read in Vega spectrum
vwave,vflux = numpy.genfromtxt(os.path.normpath(filterfolder+vegafile), comments='#', unpack=True, \
missing_values = ('NaN','nan'), filling_values = (numpy.nan))
vwave = vwave[~numpy.isnan(vflux)]*u.micron
vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron))
# trim spectrum
vflux = vflux[vwave>=numpy.nanmin(fwave)]
vwave = vwave[vwave>=numpy.nanmin(fwave)]
vflux = vflux[vwave<=numpy.nanmax(fwave)]
vwave = vwave[vwave<=numpy.nanmax(fwave)]
# convert to fnu
nu = vwave.to('Hz',equivalencies=u.spectral())
fnu = vflux.to('Jy',equivalencies=u.spectral_density(vwave))
filtnu = fwave.to('Hz',equivalencies=u.spectral())
fconst = 3631*u.jansky
d = interp1d(nu.value,fnu.value,bounds_error=False,fill_value=0.)
b = trapz((ftrans/filtnu.value)*fconst.value,filtnu.value)
return -2.5*numpy.log10(trapz(ftrans*d(filtnu.value)/filtnu.value,filtnu.value)/b)
def filterInfo(*args,**kwargs):
'''
:Purpose: Prints out the current list of filters in the SPLAT reference library.
'''
verbose = kwargs.get('verbose',True)
if len(args) > 0:
fname = list(args)
elif kwargs.get('filter',False) != False:
fname = kwargs['filter']
else:
fname = sorted(list(FILTERS.keys()))
if isinstance(fname,list) == False:
fname = [fname]
output = {}
for k in fname:
f = checkFilterName(k)
if f != False:
output[f] = {}
output[f]['description'] = FILTERS[f]['description']
output[f]['zeropoint'] = FILTERS[f]['zeropoint']
fwave,ftrans = filterProfile(f,**kwargs)
try:
fwave = fwave.to(u.micron)
except:
fwave = fwave*u.micron
fw = fwave[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
ft = ftrans[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
fw05 = fwave[numpy.where(ftrans > 0.5*numpy.nanmax(ftrans))]
output[f]['lambda_mean'] = trapz(ft*fw,fw)/trapz(ft,fw)
output[f]['lambda_pivot'] = numpy.sqrt(trapz(fw*ft,fw)/trapz(ft/fw,fw))
output[f]['lambda_central'] = 0.5*(numpy.max(fw)+numpy.min(fw))
output[f]['lambda_fwhm'] = numpy.max(fw05)-numpy.min(fw05)
output[f]['lambda_min'] = numpy.min(fw)
output[f]['lambda_max'] = numpy.max(fw)
if verbose ==True:
print(f.replace('_',' ')+': '+output[f]['zeropoint'])
print('Zeropoint = {} Jy'.format(output[f]['zeropoint']))
print('Central wavelength: = {:.3f}'.format(output[f]['lambda_central']))
print('Mean wavelength: = {:.3f}'.format(output[f]['lambda_mean']))
print('Pivot point: = {:.3f}'.format(output[f]['lambda_pivot']))
print('FWHM = {:.3f}'.format(output[f]['lambda_fwhm']))
print('Wavelength range = {:.3f} to {:.3f}\n'.format(output[f]['lambda_min'],output[f]['lambda_max']))
else:
if verbose ==True: print(' Filter {} not in SPLAT filter list'.format(k))
kys = list(output.keys())
if len(kys) == 1: return output[kys[0]]
else: return output
def filterProperties(filt,**kwargs):
'''
:Purpose: Returns a dictionary containing key parameters for a particular filter.
:param filter: name of filter, must be one of the specifed filters given by splat.FILTERS.keys()
:type filter: required
:param verbose: print out information about filter to screen
:type verbose: optional, default = True
:Example:
>>> import splat
>>> data = splat.filterProperties('2MASS J')
Filter 2MASS J: 2MASS J-band
Zeropoint = 1594.0 Jy
Pivot point: = 1.252 micron
FWHM = 0.323 micron
Wavelength range = 1.066 to 1.442 micron
>>> data = splat.filterProperties('2MASS X')
Filter 2MASS X not among the available filters:
2MASS H: 2MASS H-band
2MASS J: 2MASS J-band
2MASS KS: 2MASS Ks-band
BESSEL I: Bessel I-band
FOURSTAR H: FOURSTAR H-band
FOURSTAR H LONG: FOURSTAR H long
FOURSTAR H SHORT: FOURSTAR H short
...
'''
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
# check that requested filter is in list
filt = checkFilterName(filt)
if filt == False: return None
report = {}
report['name'] = filt
report['description'] = FILTERS[filt]['description']
report['zeropoint'] = FILTERS[filt]['zeropoint']
report['method'] = FILTERS[filt]['method']
report['rsr'] = FILTERS[filt]['rsr']
fwave,ftrans = filterProfile(filt,**kwargs)
try:
fwave = fwave.to(u.micron)
except:
fwave = fwave*u.micron
fw = fwave[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
ft = ftrans[numpy.where(ftrans > 0.01*numpy.nanmax(ftrans))]
fw05 = fwave[numpy.where(ftrans > 0.5*numpy.nanmax(ftrans))]
# print(trapz(ft,fw))
# print(trapz(fw*ft,fw))
report['lambda_mean'] = trapz(ft*fw,fw)/trapz(ft,fw)
report['lambda_pivot'] = numpy.sqrt(trapz(fw*ft,fw)/trapz(ft/fw,fw))
report['lambda_central'] = 0.5*(numpy.max(fw)+numpy.min(fw))
report['lambda_fwhm'] = numpy.max(fw05)-numpy.min(fw05)
report['lambda_min'] = numpy.min(fw)
report['lambda_max'] = numpy.max(fw)
report['wave'] = fwave
report['transmission'] = ftrans
# report values out
if kwargs.get('verbose',False):
print('\nFilter '+filt+': '+report['description'])
print('Zeropoint = {} Jy'.format(report['zeropoint']))
print('Pivot point: = {:.3f}'.format(report['lambda_pivot']))
print('FWHM = {:.3f}'.format(report['lambda_fwhm']))
print('Wavelength range = {:.3f} to {:.3f}\n'.format(report['lambda_min'],report['lambda_max']))
return report
def magToFlux(mag,filt,**kwargs):
'''
:Purpose: Converts a magnitude into an energy, and vice versa.
:param mag: magnitude on whatever system is defined for the filter or provided (required)
:param filter: name of filter, must be one of the specifed filters given by splat.FILTERS.keys() (required)
:param reverse: convert energy into magnitude instead (optional, default = False)
:param ab: magnitude is on the AB system (optional, default = filter preference)
:param vega: magnitude is on the Vega system (optional, default = filter preference)
:param rsr: magnitude is on the Vega system (optional, default = filter preference)
:param units: units for energy as an astropy.units variable; if this conversion does not work, the conversion is ignored (optional, default = erg/cm2/s)
:param verbose: print out information about filter to screen (optional, default = False)
WARNING: THIS CODE IS ONLY PARTIALLY COMPLETE
'''
# keyword parameters
filterFolder = kwargs.get('filterFolder',SPLAT_PATH+FILTER_FOLDER)
if not os.path.exists(filterFolder):
filterFolder = SPLAT_URL+FILTER_FOLDER
vegaFile = kwargs.get('vegaFile','vega_kurucz.txt')
vega = kwargs.get('vega',True)
ab = kwargs.get('ab',not vega)
rsr = kwargs.get('rsr',False)
nsamples = kwargs.get('nsamples',100)
custom = kwargs.get('custom',False)
notch = kwargs.get('notch',False)
base_unit = u.erg/(u.cm**2 * u.s)
return_unit = kwargs.get('unit',base_unit)
e_mag = kwargs.get('uncertainty',0.)
e_mag = kwargs.get('unc',e_mag)
e_mag = kwargs.get('e_mag',e_mag)
if not isinstance(mag,u.quantity.Quantity): mag=mag*u.s/u.s
if not isinstance(e_mag,u.quantity.Quantity): e_mag=e_mag*mag.unit
# check that requested filter is in list
filt = checkFilterName(filt)
if filt == False: return numpy.nan, numpy.nan
# reset filter calculation methods based on filter design
if 'ab' in FILTERS[filt]['method']:
ab = kwargs.get('ab',True)
vega = not ab
if 'vega' in FILTERS[filt]['method']:
vega = kwargs.get('vega',True)
ab = not vega
if 'rsr' in FILTERS[filt]['method']:
rsr = kwargs.get('rsr',True)
# Read in filter
if isinstance(custom,bool) and isinstance(notch,bool):
fwave,ftrans = filterProfile(filt,**kwargs)
# notch filter
elif isinstance(custom,bool) and isinstance(notch,list):
dn = (notch[1]-notch[0])/1000
fwave = numpy.arange(notch[0]-5.*dn,notch[1]+5.*dn,dn)*u.micron
ftrans = numpy.zeros(len(fwave))
ftrans[numpy.where(numpy.logical_and(fwave >= notch[0],fwave <= notch[1]))] = 1.
# custom filter
else:
fwave,ftrans = custom[0],custom[1]
if isinstance(fwave,u.quantity.Quantity) == False: fwave=fwave*u.micron
if isinstance(ftrans,u.quantity.Quantity) == True: ftrans=ftrans.value
fwave = fwave[~numpy.isnan(ftrans)]
ftrans = ftrans[~numpy.isnan(ftrans)]
result = []
err = 0.
# magnitude -> energy
if kwargs.get('reverse',False) == False:
if vega == True:
# Read in Vega spectrum
vwave,vflux = numpy.genfromtxt(os.path.normpath(filterFolder+vegaFile), comments='#', unpack=True, \
missing_values = ('NaN','nan'), filling_values = (numpy.nan))
vwave = vwave[~numpy.isnan(vflux)]*u.micron
vflux = vflux[~numpy.isnan(vflux)]*(u.erg/(u.cm**2 * u.s * u.micron))
# interpolate Vega onto filter wavelength function
v = interp1d(vwave.value,vflux.value,bounds_error=False,fill_value=0.)
if rsr: fact = trapz(ftrans*fwave.value*v(fwave.value),fwave.value)
else: fact = trapz(ftrans*v(fwave.value),fwave.value)
val = 10.**(-0.4*mag.value)*fact*u.erg/(u.cm**2 * u.s)
# calculate uncertainty
if e_mag.value > 0.:
for i in numpy.arange(nsamples): result.append(10.**(-0.4*(mag.value+numpy.random.normal(0,1.)*e_mag.value))*fact)
err = (numpy.nanstd(result))*u.erg/(u.cm**2 * u.s)
else: err = 0.*u.erg/(u.cm**2 * u.s)
elif ab == True:
fconst = 3631*u.jansky
ftrans = (ftrans*fconst).to(u.erg/(u.cm**2 * u.s * u.micron),equivalencies=u.spectral_density(fwave))
if rsr: fact = trapz(ftrans.value*fwave.value,fwave.value)
else: fact = trapz(ftrans.value,fwave.value)
val = (10.**(-0.4*mag.value)*fact)*u.erg/(u.cm**2 * u.s)
# calculate uncertainty
if e_mag.value > 0.:
for i in numpy.arange(nsamples): result.append(10.**(-0.4*(mag.value+numpy.random.normal(0,1.)*e_mag.value))*fact)
err = (numpy.nanstd(result))*u.erg/(u.cm**2 * u.s)
else: err = 0.*u.erg/(u.cm**2 * u.s)
else:
raise ValueError('\nmagToFlux needs vega or ab method specified')
# convert to desired energy units
# try:
val.to(return_unit)
err.to(return_unit)
# except:
# print('\nWarning: unit {} is not an energy flux unit'.format(return_unit))
try:
val.to(base_unit)
err.to(base_unit)
except:
print('\nWarning: cannot convert result to an energy flux unit'.format(base_unit))
return numpy.nan, numpy.nan
return val, err
# energy -> magnitude
# THIS NEEDS TO BE COMPLETED
else:
print('passed')
pass
# check that input is an energy flux
# try:
# mag.to(base_unit)
# e_mag.to(base_unit)
# except:
# raise ValueError('\nInput quantity unit {} is not a flux unit'.format(mag.unit))
def visualizeFilter(filters,verbose=True,xra=[],yra=[0,1.2],**kwargs):
'''
:Purpose: Plots a filter profile or set of filter profiles, optionally on top of a spectrum
WARNING: THIS CODE IS CURRENTLY UNDER DEVELOPMENT, BUGS MAY BE COMMON
'''
filt = copy.deepcopy(filters)
wave_unit = kwargs.get('wave_unit',DEFAULT_WAVE_UNIT)
# single filter name
if isinstance(filt,str):
filt = [filt]
if isinstance(filt,list):
# list of filter names
if isinstance(filt[0],str):
for f in filt:
fc = checkFilterName(f)
filt.remove(f)
if fc == False:
if verbose==True: print('Removed filter {}: not included in SPLAT'.format(f))
else:
filt.insert(len(filt),fc)
if len(filt) == 0:
raise ValueError('Did not recognize any of the input filters {}'.format(filters))
# prep parameters
fwave,ftrans = filterProfile(f,**kwargs)
if isUnit(fwave): wave_unit = kwargs.get('wave_unit',fwave.unit)
xl = kwargs.get('xlabel','Wavelength ({})'.format(wave_unit))
yl = kwargs.get('ylabel','Transmission Curve')
legend = []
fig = plt.figure(figsize=kwargs.get('figsize',[5,4]))
for i,f in enumerate(filt):
fwave,ftrans = filterProfile(f,**kwargs)
if isUnit(fwave): fwave.to(wave_unit)
else: fwave = fwave*wave_unit
if kwargs.get('normalize',False): ftrans = ftrans/numpy.nanmax(ftrans)
plt.plot(fwave,ftrans)
if len(xra) == 0: xra = [numpy.nanmin(fwave.value),numpy.nanmax(fwave.value)]
xra = [numpy.nanmin([xra[0],numpy.nanmin(fwave.value)]),numpy.nanmax([xra[1],numpy.nanmax(fwave.value)])]
yra = [yra[0],numpy.nanmax([yra[1],numpy.nanmax(ftrans)])]
legend.append(FILTERS[f]['description'])
if FILTERS[f]['rsr'] == True: yl = kwargs.get('ylabel','Transmission Curve')
# list of notch ranges
if isinstance(filt[0],int) or isinstance(filt[0],float):
filt = [filt]
# list of notch ranges
if isinstance(filt[0],list):
xl = kwargs.get('xlabel','Wavelength ({})'.format(wave_unit))
yl = kwargs.get('ylabel','Transmission Curve')
legend = []
fig = plt.figure(figsize=kwargs.get('figsize',[5,4]))
for i,f in enumerate(filt):
fwave,ftrans = numpy.linspace(f[0],f[1],1000)*wave_unit,numpy.ones(1000)
plt.plot(fwave,ftrans)
if len(xra) == 0: xra = [numpy.nanmin(fwave.value),numpy.nanmax(fwave.value)]
xra = [numpy.nanmin([xra[0],numpy.nanmin(fwave.value)]),numpy.nanmax([xra[1],numpy.nanmax(fwave.value)])]
yra = [yra[0],numpy.nanmax([yra[1],numpy.nanmax(ftrans)])]
legend.append('Filter {}'.format(i+1))
else:
raise ValueError('Could not parse input {}'.format(filt))
# add a comparison spectrum
sp = kwargs.get('spectrum',None)
sp = kwargs.get('comparison',sp)
if isinstance(sp,splat.core.Spectrum) == True:
print(xra)
sp.normalize(xra)
sp.scale(numpy.nanmax(ftrans)*kwargs.get('comparison_scale',0.8))
plt.plot(sp.wave,sp.flux,color=kwargs.get('comparison_color','k'),alpha=kwargs.get('comparison_alpha',0.5))
legend.append(sp.name)
yra = [yra[0],yra[1]*1.1]
# finish up
plt.xlim(xra)
plt.ylim(yra)
plt.xlabel(xl)
plt.ylabel(yl)
plt.legend(legend)
# save if desired
file = kwargs.get('file','')
file = kwargs.get('filename',file)
file = kwargs.get('output',file)
if file != '': plt.savefig(file)
return fig
#########################################
######## SED FITTING TOOLS #########
### WARNING: THESE ARE EXPERIMENTAL!! ###
#########################################
# plan:
def modelMagnitudes(verbose=True):
'''
this will be a code that calculates a set of magnitudes for a model set's SED models
saves to file that could be uploaded
pre-save some model magnitudes
'''
pass
def interpolateMagnitudes(verbose=True):
'''
produces an interpolated value for a grid set of model magnitudes
'''
pass
def compareMagnitudes(mags1,mags2,unc=None,unc2=None,ignore=[],verbose=True):
'''
this code compares a set of magnitudes using one of several statistics
'''
chi = 0.
dm,em = [],[]
for f in list(mags1.keys()):
if f in list(mags2.keys()) and f in list(unc.keys()) and f not in ignore:
dm.append(mags1[f]-mags2[f])
em.append(unc[f])
# find best scale factor
dm = numpy.array(dm)
em = numpy.array(em)
offset = numpy.sum(dm/em**2)/numpy.sum (1./em**2)
dmo = numpy.array([m-offset for m in dm])
return numpy.sum((dmo/em)**2), offset
def SEDFitGrid(verbose=True):
'''
this code will compare a set of magnitudes to a grid of model magnitudes and choose the
closest match based on various statistics
'''
pass
def SEDFitMCMC(verbose=True):
'''
this code will conduct a comparison of a set of magnitudes to model magnitudes using an
MCMC wrapper, and choose best/average/distribution of parameters
'''
pass
def SEDFitAmoeba(verbose=True):
'''
this code will conduct a comparison of a set of magnitudes to model magnitudes using an
Amoeba (Nelder-Mead) wrapper, and choose the closest match
'''
pass
def SEDVisualize(verbose=True):
'''
Visualizes magnitudes on SED scale (flux = lam x F_lam), with option of also comparing to SED spectrum
'''
pass
#####################################################
############### MAGNITUDE CLASS ###############
#####################################################
class Magnitude(object):
'''
:Description:
This is a class data structure for a magnitude value
'''
def __init__(self, magnitude, filt, uncertainty=0., magtype='apparent', verbose=False,**kwargs):
self.magnitude = magnitude
self.uncertainty = uncertainty
self.type = magtype
# check filter and rename if necessary
self.knownfilter = True
fflag = checkFilterName(filt,verbose=verbose)
if fflag == False:
if verbose== True: print('filter {} is not a standard filter; some functions may not work'.format(filt))
self.knownfilter = False
else: filt = fflag
self.filter = filt
# some things that are based on presets
if self.knownfilter == True:
self.wave,self.transmission = filterProfile(self.filter)
info = filterProperties(self.filter)
for k in info.keys(): setattr(self,k,info[k])
def __copy__(self):
'''
:Purpose: Make a copy of a Magnitude object
'''
s = type(self)(self.magnitude,self.filter,uncertainty=self.uncertainty)
s.__dict__.update(self.__dict__)
return s
# backup version
def copy(self):
'''
:Purpose: Make a copy of a Magnitude object
'''
s = type(self)(self.magnitude,self.filter,uncertainty=self.uncertainty)
s.__dict__.update(self.__dict__)
return s
def __repr__(self):
'''
:Purpose: A simple representation of the Spectrum object
'''
if self.uncertainty != 0. and numpy.isfinite(self.uncertainty):
return '{} magnitude of {}+/-{}'.format(self.filter,self.magnitude,self.uncertainty)
else:
return '{} magnitude of {}'.format(self.filter,self.magnitude)
def __add__(self,other,samp=1000):
'''
:Purpose:
A representation of addition for Magnitude classes that takes into account uncertainties
:Output:
A new Magnitude object equal to the sum of values
'''
# make a copy and fill in combined magnitude
out = copy.deepcopy(self)
out.magnitude = self.magnitude+other.magnitude
out.uncertainty = self.uncertainty+other.uncertainty
# combine noises
if self.uncertainty != 0 and other.uncertainty != 0:
m1 = numpy.random.normal(self.magnitude,self.uncertainty,samp)
m2 = numpy.random.normal(other.magnitude,other.uncertainty,samp)
val = m1+m2
out.uncertainty = numpy.nanstd(val)
# check filter agreement
if self.filter != other.filter:
out.filter = '{}+{}'.format(self.filter,other.filter)
return out
def __sub__(self,other,samp=1000):
'''
:Purpose:
A representation of subtraction for Magnitude classes that takes into account uncertainties
:Output:
A new Magnitude object equal to the diffence of values
'''
# make a copy and fill in combined magnitude
out = copy.deepcopy(self)
out.magnitude = self.magnitude-other.magnitude
out.uncertainty = self.uncertainty+other.uncertainty
# combine noises
if self.uncertainty != 0 and other.uncertainty != 0:
m1 = numpy.random.normal(self.magnitude,self.uncertainty,samp)
m2 = numpy.random.normal(other.magnitude,other.uncertainty,samp)
val = m1-m2
out.uncertainty = numpy.nanstd(val)
# check filter agreement
if self.filter != other.filter:
out.filter = '{}-{}'.format(self.filter,other.filter)
return out
def flux(self,type='fnu',samp=1000):
'''
:Purpose:
Report the equivalent flux density of a magnitude
:Output:
astropy quantity in flux density units (default = erg/cm2/s/micron)
'''
pass
def addFlux(self,other,samp=1000):
'''
:Purpose:
A representation of addition for magnitudes (addition of fluxes)
:Output:
A new magnitude object equal to the equivalent sum of fluxes
'''
# check filter agreement
if self.filter != other.filter:
raise ValueError('magnitudes filters {} and {} are not the same'.format(self.filter,other.filter))
# make a copy and fill in combined magnitude
out = copy.deepcopy(self)
out.magnitude = self.magnitude-2.5*numpy.log10(1.+10.**(-0.4*(other.magnitude-self.magnitude)))
out.uncertainty = self.uncertainty+other.uncertainty
# combine noises
if self.uncertainty != 0 and other.uncertainty != 0:
m1 = numpy.random.normal(self.magnitude,self.uncertainty,samp)
m2 = numpy.random.normal(other.magnitude,other.uncertainty,samp)
val = m1-2.5*numpy.log10(1.+10.**(-0.4*(m2-m1)))
out.uncertainty = numpy.nanstd(val)
return out
| 2.65625 | 3 |
helpers/time_utils.py | mandalorian-101/badger-system | 0 | 2995 | <reponame>mandalorian-101/badger-system
import datetime
ONE_MINUTE = 60
ONE_HOUR = 3600
ONE_DAY = 24 * ONE_HOUR
ONE_YEAR = 1 * 365 * ONE_DAY
def days(days):
return int(days * 86400.0)
def hours(hours):
return int(hours * 3600.0)
def minutes(minutes):
return int(minutes * 60.0)
def to_utc_date(timestamp):
return datetime.datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%dT%H:%M:%SZ")
def to_timestamp(date):
print(date.timestamp())
return int(date.timestamp())
def to_minutes(duration):
return duration / ONE_MINUTE
def to_days(duration):
return duration / ONE_DAY
def to_hours(duration):
return duration / ONE_HOUR | 3.296875 | 3 |
example_scripts/profile_validation/plot_validation_gridded_data.py | British-Oceanographic-Data-Centre/NEMO-ENTRUST | 0 | 2996 | """
Plot up surface or bottom (or any fixed level) errors from a profile object
with no z_dim (vertical dimension). Provide an array of netcdf files and
mess with the options to get a figure you like.
You can define how many rows and columns the plot will have. This script will
plot the provided list of netcdf datasets from left to right and top to bottom.
A colorbar will be placed right of the figure.
"""
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append("/Users/dbyrne/code/COAsT")
import coast
import pandas as pd
#%% File settings
run_name = "test"
# List of analysis output files. Profiles from each will be plotted
# on each axis of the plot
fn_list = [
"~/transfer/test_grid.nc",
"~/transfer/test_grid.nc",
]
# Filename for the output
fn_out = "/Users/dbyrne/transfer/surface_gridded_errors_{0}.png".format(run_name)
#%% General Plot Settings
var_name = "abs_diff_temperature" # Variable name in analysis file to plot
# If you used var modified to make gridded data
# then this is where to select season etc.
save_plot = False
# Masking out grid cells that don't contain many points
min_points_in_average = 5
name_of_count_variable = "grid_N"
# Subplot axes settings
n_r = 2 # Number of subplot rows
n_c = 2 # Number of subplot columns
figsize = (10, 5) # Figure size
lonbounds = [-15, 9.5] # Longitude bounds
latbounds = [45, 64] # Latitude bounds
subplot_padding = 0.5 # Amount of vertical and horizontal padding between plots
fig_pad = (0.075, 0.075, 0.1, 0.1) # Figure padding (left, top, right, bottom)
# Leave some space on right for colorbar
# Scatter opts
marker_size = 3 # Marker size
cmap = "bwr" # Colormap for normal points
clim = (-1, 1) # Color limits for normal points
discrete_cmap = True # Discretize colormap
cmap_levels = 14
# Labels and Titles
fig_title = "SST Errors" # Whole figure title
title_fontsize = 13 # Fontsize of title
title_fontweight = "bold" # Fontweight to use for title
dataset_names = ["CO9p0", "CO9p0", "CO9p0"] # Names to use for labelling plots
subtitle_fontsize = 11 # Fontsize for dataset subtitles
subtitle_fontweight = "normal" # Fontweight for dataset subtitles
# PLOT SEASONS. Make sure n_r = 2 and n_c = 2
# If this option is true, only the first dataset will be plotted, with seasonal
# variables on each subplot. The season_suffixes will be added to var_name
# for each subplot panel.
plot_seasons = True
season_suffixes = ["DJF", "MAM", "JJA", "SON"]
#%% Read and plotdata
# Read all datasets into list
ds_list = [xr.open_dataset(dd) for dd in fn_list]
n_ds = len(ds_list)
n_ax = n_r * n_c
# Create plot and flatten axis array
f, a = coast.plot_util.create_geo_subplots(lonbounds, latbounds, n_r, n_c, figsize=figsize)
a_flat = a.flatten()
# Dicretize colormap maybe
if discrete_cmap:
cmap = plt.cm.get_cmap(cmap, cmap_levels)
# Determine if we will extend the colormap or not
extend_cbar = []
# Loop over dataset
for ii in range(n_ax):
ur_index = np.unravel_index(ii, (n_r, n_c))
# Select season if required
if plot_seasons:
ds = ds_list[0]
var_ii = var_name + "_{0}".format(season_suffixes[ii])
N_var = "{0}_{1}".format(name_of_count_variable, season_suffixes[ii])
a_flat[ii].text(0.05, 1.02, season_suffixes[ii], transform=a_flat[ii].transAxes, fontweight="bold")
else:
ds = ds_list[ii]
var_ii = var_name
a_flat[ii].set_title(dataset_names[ii], fontsize=subtitle_fontsize, fontweight=subtitle_fontweight)
N_var = name_of_count_variable
data = ds[var_ii].values
count_var = ds[N_var]
data[count_var < min_points_in_average] = np.nan
# Scatter and set title
pc = a_flat[ii].pcolormesh(
ds.longitude,
ds.latitude,
data,
cmap=cmap,
vmin=clim[0],
vmax=clim[1],
)
# Will we extend the colorbar for this dataset?
extend_cbar.append(coast.plot_util.determine_colorbar_extension(data, clim[0], clim[1]))
# Set Figure title
f.suptitle(fig_title, fontsize=title_fontsize, fontweight=title_fontweight)
# Set tight figure layout
f.tight_layout(w_pad=subplot_padding, h_pad=subplot_padding)
f.subplots_adjust(left=(fig_pad[0]), bottom=(fig_pad[1]), right=(1 - fig_pad[2]), top=(1 - fig_pad[3]))
# Handle colorbar -- will we extend it?
if "both" in extend_cbar:
extend = "both"
elif "max" in extend_cbar and "min" in extend_cbar:
extend = "both"
elif "max" in extend_cbar:
extend = "max"
elif "min" in extend_cbar:
extend = "min"
else:
extend = "neither"
cbar_ax = f.add_axes([(1 - fig_pad[2] + fig_pad[2] * 0.15), 0.15, 0.025, 0.7])
f.colorbar(pc, cax=cbar_ax, extend=extend)
# Save plot maybe
if save_plot:
f.savefig(fn_out)
| 2.90625 | 3 |
feature-engineering/samples/statistical_features.py | jeury301/text-classifier | 0 | 2997 | from sklearn.feature_extraction.text import TfidfVectorizer
def compute_tf_idf(corpus):
"""Computing term frequency (tf) - inverse document frequency (idf).
:param corpus: List of documents.
:returns: tf-idf of corpus.
"""
return TfidfVectorizer().fit_transform(corpus)
if __name__ == '__main__':
sample_corpus = [
'This is sample document.',
'another random document.',
'third sample document text'
]
print(compute_tf_idf(sample_corpus))
| 3.5 | 4 |
Gds/src/fprime_gds/executables/tcpserver.py | hunterpaulson/fprime | 0 | 2998 | <gh_stars>0
#!/usr/bin/env python3
from __future__ import print_function
import socket
import threading
try:
import socketserver
except ImportError:
import SocketServer as socketserver
import time
import os
import signal
import sys
import struct
import errno
from fprime.constants import DATA_ENCODING
from optparse import OptionParser
__version__ = 0.1
__date__ = "2015-04-03"
__updated__ = "2016-04-07"
# Universal server id global
SERVER = None
LOCK = None
shutdown_event = threading.Event()
FSW_clients = []
GUI_clients = []
FSW_ids = []
GUI_ids = []
def signal_handler(*_):
print("Ctrl-C received, server shutting down.")
shutdown_event.set()
def now():
return time.ctime(time.time())
class ThreadedTCPRequestHandler(socketserver.StreamRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.StreamRequestHandler.allow_reuse_address = True
socketserver.StreamRequestHandler.timeout = 1
def handle(self): # on each client connect
"""
The function that is invoked upon a new client. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.partial = b""
self.cmdQueue = []
self.registered = False
self.name = b""
self.id = 0
# print self.client_address, now() # show this client's address
# Read the data from the socket
data = self.recv(13)
# Connection was closed by the client
if not data:
print("Client exited.")
return
else:
# Process the data into the cmdQueue
self.getCmds(data)
# Process the cmdQueue
self.processQueue()
if self.registered:
print("Registration complete waiting for message.")
self.getNewMsg()
else:
print("Unable to register client.")
return
LOCK.acquire()
del SERVER.dest_obj[self.name]
if self.name in FSW_clients:
FSW_clients.remove(self.name)
FSW_ids.remove(self.id)
elif self.name in GUI_clients:
GUI_clients.remove(self.name)
GUI_ids.remove(self.id)
LOCK.release()
print("Closed %s connection." % self.name.decode(DATA_ENCODING))
self.registered = False
self.request.close()
def getCmds(self, inputString, end_of_command=b"\n"):
"""
Build a command from partial or full socket input
"""
commands = inputString.split(end_of_command)
if len(self.partial):
commands[0] = self.partial + commands[0]
self.partial = b""
if len(commands[-1]):
self.partial = commands[-1]
self.cmdQueue.extend(commands[:-1])
else:
self.cmdQueue.extend(commands[:-1])
def processQueue(self):
for cmd in self.cmdQueue:
self.processRegistration(cmd)
self.cmdQueue = []
def processRegistration(self, cmd):
params = cmd.split()
process_id = 0
if params[0] == b"Register":
LOCK.acquire()
name = params[1]
if b"FSW" in name:
if FSW_clients:
process_id = sorted(FSW_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
FSW_clients.append(name)
FSW_ids.append(process_id)
elif b"GUI" in name:
if GUI_clients:
process_id = sorted(GUI_ids)[-1] + 1
name = params[1] + b"_" + bytes(process_id)
GUI_clients.append(name)
GUI_ids.append(process_id)
SERVER.dest_obj[name] = DestObj(name, self.request)
LOCK.release()
self.registered = True
self.name = name
self.id = process_id
print("Registered client " + self.name.decode(DATA_ENCODING))
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Loop while the connected client has packets to send/receive
while not shutdown_event.is_set():
# Read the header data from the socket either A5A5 or List
header = self.readHeader()
# If the received header is an empty string, connection closed, exit loop
if not header:
break
elif header == b"Quit":
LOCK.acquire()
print("Quit received!")
SERVER.dest_obj[self.name].put(struct.pack(">I", 0xA5A5A5A5))
shutdown_event.set()
time.sleep(1)
print("Quit processed!")
SERVER.shutdown()
SERVER.server_close()
LOCK.release()
break
# Got the header data so read the data of the message here...
data = self.readData(header)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def recv(self, l):
"""
Read l bytes from socket.
"""
chunk = b""
msg = b""
n = 0
while l > n:
try:
chunk = self.request.recv(l - n)
if chunk == b"":
print("read data from socket is empty!")
return b""
msg = msg + chunk
n = len(msg)
except socket.timeout:
if shutdown_event.is_set():
print("socket timed out and shutdown is requested")
return b"Quit\n"
continue
except socket.error as err:
if err.errno == errno.ECONNRESET:
print(
"Socket error "
+ str(err.errno)
+ " (Connection reset by peer) occurred on recv()."
)
else:
print("Socket error " + str(err.errno) + " occurred on recv().")
return msg
def readHeader(self):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = self.recv(5)
if len(header) == 0:
print(
"Header information is empty, client "
+ self.name.decode(DATA_ENCODING)
+ " exiting."
)
return header
if header == b"List\n":
return b"List"
elif header == b"Quit\n":
return b"Quit"
elif header[:-1] == b"A5A5":
header2 = self.recv(4)
return header + header2
else:
return
def readData(self, header):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = b""
if header == b"List":
return b""
elif header == b"Quit":
return b""
dst = header.split(b" ")[1].strip(b" ")
if dst == b"FSW":
# Read variable length command data here...
desc = self.recv(4)
sizeb = self.recv(4)
size = struct.unpack(">I", sizeb)[0]
data = desc + sizeb + self.recv(size)
elif dst == b"GUI":
# Read telemetry data here...
tlm_packet_size = self.recv(4)
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + self.recv(size)
else:
raise RuntimeError("unrecognized client %s" % dst.decode(DATA_ENCODING))
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header tstring is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
if header == b"List":
print("List of registered clients: ")
LOCK.acquire()
for d in list(SERVER.dest_obj.keys()):
print("\t" + SERVER.dest_obj[d].name.decode(DATA_ENCODING))
reg_client_str = b"List " + SERVER.dest_obj[d].name
l = len(reg_client_str)
reg_client_str = struct.pack("i%ds" % l, l, reg_client_str)
self.request.send(reg_client_str)
LOCK.release()
return 0
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == b"":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
elif b"FSW" in dst:
dest_list = FSW_clients
for dest_elem in dest_list:
# print "Locking TCP"
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending TCP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Packet missing A5A5 header")
class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
"""
Derived from original Stable demo during R&TD and adapted
for use in new FSW gse.py applicaiton.
TCP socket server for commands, log events, and telemetry data.
Later this will handle other things such as sequence files and parameters.
Handle is instanced in own thread for each client.
Registration is done by sending the string "Register <name>".
Sending a message to destination <name> is done as
"A5A5 <name> <data>" Note only <data> is sent.
Any client that sends a "List" comment makes the server display all
registered clients.
"""
socketserver.BaseRequestHandler.allow_reuse_address = True
def handle(self): # on each packet
"""
The function that is invoked when a packet is received. This function listens
for data on the socket. Packets for now are assumed to be separated
by a newline. For each packet, call processPkt.
"""
self.getNewMsg(self.request[0])
#################################################
# New Routines to process the command messages
#################################################
def getNewMsg(self, packet):
"""
After registration wait for an incoming message
The first part must always be an "A5A5 " or a "List "
"""
# Read the header data from the socket either A5A5 or List
(header, packet) = self.readHeader(packet)
# If the received header is an empty string, connection closed, exit loop
if not header:
return
# Got the header data so read the data of the message here...
data = self.readData(header, packet)
# Process and send the packet of the message here...
self.processNewPkt(header, data)
def readHeader(self, packet):
"""
Read the 9 byte header (e.g. "A5A5 GUI " or "A5A5 FSW "),
or just read the "List\n" command.
"""
header = packet[:4]
header2 = packet[4:9]
packet = packet[9:]
return (header + header2, packet)
def readData(self, header, packet):
"""
Read the data part of the message sent to either GUI or FSW.
GUI receives telemetry.
FSW receives commands of various lengths.
"""
data = ""
dst = header.split(b" ")[1].strip(b" ")
# Read telemetry data here...
tlm_packet_size = packet[:4]
size = struct.unpack(">I", tlm_packet_size)[0]
data = tlm_packet_size + packet[4 : 4 + size]
return data
def processNewPkt(self, header, data):
"""
Process a single command here header and data here.
The command must always start with A5A5 except if it is a List.
Once the entire header string is processed send it on queue.
If something goes wrong report and shutdown server.
"""
dest_list = []
# Process data here...
head, dst = header.strip(b" ").split(b" ")
if head == b"A5A5": # Packet Header
# print "Received Packet: %s %s...\n" % (head,dst)
if data == "":
print(" Data is empty, returning.")
if b"GUI" in dst:
dest_list = GUI_clients
else:
print("dest? %s" % dst.decode(DATA_ENCODING))
for dest_elem in dest_list:
LOCK.acquire()
if dest_elem in list(SERVER.dest_obj.keys()):
# Send the message here....
# print "Sending UDP msg to ", dest_elem
SERVER.dest_obj[dest_elem].put(data)
LOCK.release()
else:
raise RuntimeError("Telemetry missing A5A5 header")
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
"""
TCP Socket server.
Keep a dictionary of destination objects containing queues and
socket id's for writting to destinations.
"""
dest_obj = dict()
lock_obj = threading.Lock()
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
"""
UDP Socket server.
"""
class DestObj:
"""
Destination object for all clients registered.
"""
def __init__(self, name, request):
"""
Constructor
"""
self.name = name
self.socket = request
self.packet = b""
def put(self, msg):
"""
Write out the message to the destination socket
"""
try:
# print "about to send data to " + self.name
self.socket.send(msg)
except socket.error as err:
print("Socket error " + str(err.errno) + " occurred on send().")
def fileno(self):
"""
"""
return self.socket
def main(argv=None):
global SERVER, LOCK
program_name = os.path.basename(sys.argv[0])
program_license = "Copyright 2015 user_name (California Institute of Technology) \
ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged."
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = "%%prog %s (%s)" % (program_version, program_build_date)
program_longdesc = (
"""""" # optional - give further explanation about what the program does
)
if argv is None:
argv = sys.argv[1:]
try:
parser = OptionParser(
version=program_version_string,
epilog=program_longdesc,
description=program_license,
)
parser.add_option(
"-p",
"--port",
dest="port",
action="store",
type="int",
help="Set threaded tcp socket server port [default: %default]",
default=50007,
)
parser.add_option(
"-i",
"--host",
dest="host",
action="store",
type="string",
help="Set threaded tcp socket server ip [default: %default]",
default="127.0.0.1",
)
# process options
(opts, args) = parser.parse_args(argv)
HOST = opts.host
PORT = opts.port
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
udp_server = ThreadedUDPServer((HOST, PORT), ThreadedUDPRequestHandler)
# Hopefully this will allow address reuse and server to restart immediately
server.allow_reuse_address = True
SERVER = server
LOCK = server.lock_obj
ip, port = server.server_address
print("TCP Socket Server listening on host addr %s, port %s" % (HOST, PORT))
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
udp_server_thread = threading.Thread(target=udp_server.serve_forever)
signal.signal(signal.SIGINT, signal_handler)
server_thread.daemon = False
server_thread.start()
udp_server_thread.daemon = False
udp_server_thread.start()
while not shutdown_event.is_set():
server_thread.join(timeout=5.0)
udp_server_thread.join(timeout=5.0)
print("shutdown from main thread")
SERVER.shutdown()
SERVER.server_close()
udp_server.shutdown()
udp_server.server_close()
time.sleep(1)
except Exception as e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help\n")
return 2
if __name__ == "__main__":
sys.exit(main())
| 2.5 | 2 |
btb_manager_telegram/__init__.py | haivle/BTB-manager-telegram | 3 | 2999 | import logging
import sched
import time
(
MENU,
EDIT_COIN_LIST,
EDIT_USER_CONFIG,
DELETE_DB,
UPDATE_TG,
UPDATE_BTB,
PANIC_BUTTON,
CUSTOM_SCRIPT,
) = range(8)
BOUGHT, BUYING, SOLD, SELLING = range(4)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger("btb_manager_telegram_logger")
scheduler = sched.scheduler(time.time, time.sleep)
| 2.359375 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.