max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
ui_mant_libros.py
edzzn/Manejo_Liberia
0
3100
from PyQt4 import QtGui from ui_mant_libros_new import NewLibrosWindow from ui_mant_libros_edit import EditLibrosWindow from ui_mant_libros_id_edit import GetIdEditWindow # Debug only import inspect class MenuLibros(QtGui.QWidget): """ Ventana-menu para editar Libros """ def __init__(self): super(MenuLibros, self).__init__() self.createButtons() self.setWindowTitle('Mantenimiento Libros') self.setWindowIcon(QtGui.QIcon('images/user-plus.png')) self.setWindowTitle("Mantenimiento Libros") self.setGeometry(650, 300, 150, 100) def createButtons(self): btn_new_libros = QtGui.QPushButton('Nuevo') btn_new_libros.clicked.connect(self.open_new_libros_window) btn_edit_libros = QtGui.QPushButton('Editar') btn_edit_libros.clicked.connect(self.open_edit_libros_window) btn_list_libros = QtGui.QPushButton('Listar') btn_list_libros.clicked.connect(self.close) btn_delete_libros = QtGui.QPushButton('Eliminar') btn_delete_libros.clicked.connect(self.close) hbox = QtGui.QHBoxLayout() hbox.addWidget(btn_new_libros) hbox.addWidget(btn_edit_libros) hbox.addWidget(btn_list_libros) hbox.addWidget(btn_delete_libros) vbox = QtGui.QVBoxLayout() vbox.addLayout(hbox) self.setLayout(vbox) def open_new_libros_window(self): self.new_libros_view = NewLibrosWindow() self.new_libros_view.show() print(inspect.stack()[0][3]) self.close() def open_edit_libros_window(self): self.edit_libros_view = GetIdEditWindow() self.edit_libros_view.show() print(inspect.stack()[0][3]) self.close() def open_list_reserva_window(self): # self.new_reserva_view = NewReserva() # self.new_reserva_view.show() print(inspect.stack()[0][3]) self.close() def open_delete_reserva_window(self): # self.new_reserva_view = NewReserva() # self.new_reserva_view.show() print(inspect.stack()[0][3]) self.close() if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) mainWin = MenuLibros() mainWin.show() sys.exit(app.exec_())
2.28125
2
env/gym_poker_ai/envs/tests/holdem_calc/holdem_argparser.py
MrStonkus/PokerAi
0
3101
<reponame>MrStonkus/PokerAi<gh_stars>0 import argparse import re import holdem_calc.holdem_functions as holdem_functions # Wrapper class which holds the arguments for library calls # Mocks actual argparse object class LibArgs: def __init__(self, board, exact, num, input_file, hole_cards): self.board = board self.cards = hole_cards self.n = num self.input = input_file self.exact = exact # Parses arguments passed to holdem_calc as a library call def parse_lib_args(args): error_check_arguments(args) # Parse hole cards and board hole_cards, board = None, None if not args.input: hole_cards, board = parse_cards(args.cards, args.board) return hole_cards, args.n, args.exact, board, args.input # Parses command line arguments to holdem_calc def parse_args(): # Define possible command line arguments parser = argparse.ArgumentParser( description="Find the odds that a Texas Hold'em hand will win. Note " "that cards must be given in the following format: As, Jc, Td, 3h.") parser.add_argument("cards", nargs="*", type=str, metavar="hole card", help="Hole cards you want to find the odds for.") parser.add_argument("-b", "--board", nargs="*", type=str, metavar="card", help="Add board cards") parser.add_argument("-e", "--exact", action="store_true", help="Find exact odds by enumerating every possible " "board") parser.add_argument("-n", type=int, default=100000, help="Run N Monte Carlo simulations") parser.add_argument("-i", "--input", type=str, help="Read hole cards and boards from an input file. " "Commandline arguments for hole cards and board will " "be ignored") # Parse command line arguments and check for errors args = parser.parse_args() error_check_arguments(args) # Parse hole cards and board hole_cards, board = None, None if not args.input: hole_cards, board = parse_cards(args.cards, args.board) return hole_cards, args.n, args.exact, board, args.input # Parses a line taken from the input file and returns the hole cards and board def parse_file_args(line): if line is None or len(line) == 0: print(line) print("Invalid format") exit() values = line.split("|") if len(values) > 2 or len(values) < 1: print(line) print("Invalid format") exit() hole_cards = values[0].split() all_cards = list(hole_cards) board = None if len(values) == 2: board = values[1].split() all_cards.extend(board) error_check_cards(all_cards) return parse_cards(hole_cards, board) # Parses hole cards and board def parse_cards(cards, board): hole_cards = create_hole_cards(cards) if board: board = parse_board(board) return hole_cards, board # Error check the command line arguments def error_check_arguments(args): # Check that the number of Monte Carlo simulations is a positive number if args.n <= 0: print("Number of Monte Carlo simulations must be positive.") exit() # Check that we can open the specified input file if args.input: file_name = args.input try: input_file = open(file_name, 'r') input_file.close() except IOError: print("Error opening file " + file_name) exit() # Check to make sure all cards are of a valid format all_cards = list(args.cards) if args.board: all_cards.extend(args.board) error_check_cards(all_cards) # Error check the command line arguments def error_check_arguments(args): # Check that the number of Monte Carlo simulations is a positive number if args.n <= 0: print("Number of Monte Carlo simulations must be positive.") exit() # Check that we can open the specified input file if args.input: file_name = args.input try: input_file = open(file_name, 'r') input_file.close() except IOError: print("Error opening file " + file_name) exit() # Check to make sure all cards are of a valid format all_cards = list(args.cards) if args.board: all_cards.extend(args.board) error_check_cards(all_cards) # Checking that the hole cards + board are formatted properly and unique def error_check_cards(all_cards): card_re = re.compile('[AKQJT98765432][scdh]') for card in all_cards: if card != "?" and not card_re.match(card): print("Invalid card given.") exit() else: if all_cards.count(card) != 1 and card != "?": print("The cards given must be unique.") exit() # Returns tuple of two-tuple hole_cards: e.g. ((As, Ks), (Ad, Kd), (Jh, Th)) def create_hole_cards(raw_hole_cards): # Checking that there are an even number of hole cards if (raw_hole_cards is None or len(raw_hole_cards) < 2 or len(raw_hole_cards) % 2): print("You must provide a non-zero even number of hole cards") exit() # Create two-tuples out of hole cards hole_cards, current_hole_cards = [], [] for hole_card in raw_hole_cards: if hole_card != "?": current_card = holdem_functions.Card(hole_card) current_hole_cards.append(current_card) else: current_hole_cards.append(None) if len(current_hole_cards) == 2: if None in current_hole_cards: if (current_hole_cards[0] is not None or current_hole_cards[1] is not None): print("Unknown hole cards must come in pairs") exit() hole_cards.append((current_hole_cards[0], current_hole_cards[1])) current_hole_cards = [] if hole_cards.count((None, None)) > 1: print("Can only have one set of unknown hole cards") return tuple(hole_cards) # Returns list of board cards: e.g. [As Ks Ad Kd] def parse_board(board): if len(board) > 5 or len(board) < 3: print("Board must have a length of 3, 4, or 5.") exit() if "?" in board: print("Board cannot have unknown cards") exit() return create_cards(board) # Instantiates new cards from the arguments and returns them in a tuple def create_cards(card_strings): return [holdem_functions.Card(arg) for arg in card_strings]
3.296875
3
qbay/controllers.py
KarlDorogy/Cisc-327-Course-Project-Group-20
0
3102
from flask import render_template, request, session, redirect from qbay.models import * from datetime import date from qbay import app def authenticate(inner_function): """ :param inner_function: any python function that accepts a user object Wrap any python function and check the current session to see if the user has logged in. If login, it will call the inner_function with the logged in user object. To wrap a function, we can put a decoration on that function. Example: @authenticate def home_page(user): pass """ def wrapped_inner(): # check did we store the key in the session if 'logged_in' in session: email = session['logged_in'] try: user = User.query.filter_by(email=email).one_or_none() if user: # if the user exists, call the inner_function # with user as parameter return inner_function(user) except Exception: return redirect('/login') else: # else, redirect to the login page return redirect('/login') # return the wrapped version of the inner_function: return wrapped_inner @app.route('/login', methods=['GET']) def login_get(): return render_template('login.html', message='Please login') @app.route('/login', methods=['POST']) def login_post(): email = request.form.get('email') password = request.form.get('password') user = login(email, password) if user: session['logged_in'] = user.email """ Session is an object that contains sharing information between a user's browser and the end server. Typically it is packed and stored in the browser cookies. They will be past along between every request the browser made to this services. Here we store the user object into the session, so we can tell if the client has already login in the following sessions. """ # success! go back to the home page # code 303 is to force a 'GET' request return redirect('/', code=303) else: return render_template('login.html', message='login failed') @app.route('/') @authenticate def home(user): # gets a list of products that the logged in user owns user_products = get_products(user.email) # gets list of user purchased products products = get_transaction(user.email) return render_template('index.html', user=user, owned_products=user_products, orders=products) @app.route('/register', methods=['GET']) def register_get(): # templates are stored in the templates folder return render_template('register.html', message='') @app.route('/register', methods=['POST']) def register_post(): email = request.form.get('email') name = request.form.get('name') password = request.form.get('password') password2 = request.form.get('password2') error_message = None if password != <PASSWORD>: error_message = "The passwords do not match" else: # use backend api to register the user success = register(name, email, password) if not success: error_message = "Registration Failed." # if there is any error messages when registering new user # at the backend, go back to the register page. if error_message: return render_template('register.html', message=error_message) else: return redirect('/login') @app.route('/updateuser', methods=['Get']) def update_user_get(): return render_template('updateuser.html', message='Please enter new info below:') @app.route('/updateuser', methods=['POST']) def update_user_post(): # retrieves current logged in user's email user_email = session['logged_in'] name = request.form.get('name') shipping_address = request.form.get('shippingaddress') postal_code = request.form.get('postalcode') error_message = None # use backend api to update the user attributes success = update_user(user_email, name, shipping_address, postal_code) if not success: error_message = "Updating of User Profile Failed." # if there is any error messages when updateing user profile # at the backend, go back to the update page. if error_message: return render_template('updateuser.html', message=error_message) else: return redirect('/', code=303) @app.route('/updateproduct', methods=['Get']) def update_product_get(): return render_template('updateproduct.html', message="Please enter new product info below:", pName=request.args.get('pName')) @app.route('/updateproduct', methods=['POST']) def update_product_post(): new_price = int(request.form.get('new_price')) new_title = request.form.get('new_title') new_description = request.form.get('new_description') title = request.form.get('title') # use backend api to update the user attributes success = update_product(new_price, new_title, new_description, title) error_message = None if not success: error_message = "Product Update Failed" # if there is any error messages when creating a product # at the backend, go back to the create product page. if error_message: return render_template('updateproduct.html', message=error_message, pName=request.args.get('pName')) else: return redirect('/', code=303) @app.route('/createproduct', methods=['Get']) def create_product_get(): return render_template('createproduct.html', message='Please enter product info below:') @app.route('/createproduct', methods=['POST']) def create_product_post(): # retrieves current logged in user's email owner_email = session['logged_in'] today = date.today() current_date = today.strftime("%d/%m/%Y") last_modified_date = (current_date[6:10] + "-" + current_date[3:5] + "-" + current_date[0:2]) price = int(request.form.get('price')) title = request.form.get('title') description = request.form.get('description') error_message = None # use backend api to update the user attributes success = create_product(price, title, description, last_modified_date, owner_email) if not success: error_message = "Product Creation Failed." # if there is any error messages when creating a product # at the backend, go back to the create product page. if error_message: return render_template('createproduct.html', message=error_message) else: return redirect('/', code=303) @app.route('/listings', methods=['GET']) def available_products_get(): # retrieves current logged in user's email user_email = session['logged_in'] # gets other user products that are available to purchase products = get_listings(user_email) return render_template('available_products.html', available_products=products) @app.route('/placeorder', methods=['GET']) def place_order_get(): return render_template('placeorder.html', message="Please confirm the purchase below:", pTitle=request.args.get('pTitle'), pPrice=request.args.get('pPrice')) @app.route('/placeorder', methods=['POST']) def place_order_post(): new_owner = session['logged_in'] product_title = request.args.get('pTitle') # use backend api to place the product order success = place_order(new_owner, product_title) error_message = None if not success: error_message = "Placing Order Failed" # if there is any error messages when ordering product # at the backend, go back to the available product listings page. if error_message: return render_template('available_products.html', message=error_message) else: return redirect('/', code=303) @app.route('/logout') def logout(): if 'logged_in' in session: session.pop('logged_in', None) return redirect('/')
3.484375
3
gbfs/serializers.py
stadtulm/cykel
80
3103
<gh_stars>10-100 from datetime import timedelta from django.utils.timezone import now from preferences import preferences from rest_framework import fields, serializers from bikesharing.models import Bike, Station, VehicleType from cykel.serializers import EnumFieldSerializer class TimestampSerializer(fields.CharField): def to_representation(self, value): return value.timestamp() class GbfsFreeBikeStatusSerializer(serializers.HyperlinkedModelSerializer): bike_id = serializers.CharField(source="non_static_bike_uuid", read_only=True) vehicle_type_id = serializers.CharField(read_only=True) last_reported = TimestampSerializer(read_only=True) class Meta: model = Bike fields = ( "bike_id", "vehicle_type_id", "current_range_meters", "last_reported", ) def to_representation(self, instance): representation = super().to_representation(instance) # defined by GBFS 2.1: Only if the vehicle has a motor the field is required if ( instance.vehicle_type is not None and instance.vehicle_type.propulsion_type == VehicleType.PropulsionType.HUMAN ): representation.pop("current_range_meters") # Default to False TODO: maybe configuration later representation["is_reserved"] = False # Default to False TODO: maybe configuration later representation["is_disabled"] = False public_geolocation = instance.public_geolocation() if public_geolocation is not None: pos = public_geolocation.geo if pos and pos.x and pos.y: representation["lat"] = pos.y representation["lon"] = pos.x return representation # only return bikes with public geolocation class GbfsVehicleOnStationSerializer(GbfsFreeBikeStatusSerializer): def to_representation(self, instance): representation = super().to_representation(instance) if representation is None: return None representation.pop("lat") representation.pop("lon") return representation class GbfsStationInformationSerializer(serializers.HyperlinkedModelSerializer): name = serializers.CharField(source="station_name", read_only=True) capacity = serializers.IntegerField(source="max_bikes", read_only=True) station_id = serializers.CharField(source="id", read_only=True) class Meta: model = Station fields = ( "name", "capacity", "station_id", ) def to_representation(self, instance): representation = super().to_representation(instance) if ( instance.location is not None and instance.location.x and instance.location.y ): representation["lat"] = instance.location.y representation["lon"] = instance.location.x return representation class GbfsStationStatusSerializer(serializers.HyperlinkedModelSerializer): station_id = serializers.CharField(source="id", read_only=True) vehicles = serializers.SerializerMethodField() def get_vehicles(self, obj): # if configured filter vehicles, where time report # is older than configure allowed silent timeperiod bsp = preferences.BikeSharePreferences if bsp.gbfs_hide_bikes_after_location_report_silence: available_bikes = obj.bike_set.filter( availability_status=Bike.Availability.AVAILABLE, last_reported__gte=now() - timedelta(hours=bsp.gbfs_hide_bikes_after_location_report_hours), ) else: available_bikes = obj.bike_set.filter( availability_status=Bike.Availability.AVAILABLE ) vehicles = GbfsVehicleOnStationSerializer(available_bikes, many=True).data return list(filter(lambda val: val is not None, vehicles)) class Meta: model = Station fields = ( "station_id", "vehicles", ) def to_representation(self, instance): representation = super().to_representation(instance) representation["num_bikes_available"] = len(representation["vehicles"]) representation["num_docks_available"] = ( instance.max_bikes - representation["num_bikes_available"] ) if representation["num_bikes_available"] > 0: representation["last_reported"] = max( ( vehicle["last_reported"] if vehicle["last_reported"] is not None else 0 ) for vehicle in representation["vehicles"] ) else: # if no bike is at the station, last_report is the current time # not sure if this is the intended behavior of the field # or it should be the timestamp of the last bike removed # but it is not so easy to implement representation["last_reported"] = int(now().timestamp()) def drop_last_reported(obj): obj.pop("last_reported") return obj representation["vehicles"] = list( map(drop_last_reported, representation["vehicles"]) ) status = (instance.status == Station.Status.ACTIVE) or False representation["is_installed"] = status representation["is_renting"] = status representation["is_returning"] = status return representation class GbfsVehicleTypeSerializer(serializers.HyperlinkedModelSerializer): vehicle_type_id = serializers.CharField(source="id", read_only=True) form_factor = EnumFieldSerializer( read_only=True, mapping={ VehicleType.FormFactor.BIKE: "bicycle", VehicleType.FormFactor.ESCOOTER: "scooter", VehicleType.FormFactor.CAR: "car", VehicleType.FormFactor.MOPED: "moped", VehicleType.FormFactor.OTHER: "other", }, ) propulsion_type = EnumFieldSerializer( read_only=True, mapping={ VehicleType.PropulsionType.HUMAN: "human", VehicleType.PropulsionType.ELECTRIC_ASSIST: "electric_assist", VehicleType.PropulsionType.ELECTRIC: "electric", VehicleType.PropulsionType.COMBUSTION: "combustion", }, ) def to_representation(self, instance): data = super(GbfsVehicleTypeSerializer, self).to_representation(instance) # defined by GBFS 2.1: Only if the vehicle has a motor the field is required if instance.propulsion_type == VehicleType.PropulsionType.HUMAN: data.pop("max_range_meters") return data class Meta: model = VehicleType fields = ( "vehicle_type_id", "form_factor", "propulsion_type", "max_range_meters", "name", )
2.140625
2
anime_downloader/extractors/vidstream.py
ngomile/anime-downloader
2
3104
import logging import re from anime_downloader.extractors.base_extractor import BaseExtractor from anime_downloader.sites import helpers logger = logging.getLogger(__name__) class VidStream(BaseExtractor): def _get_data(self): QUALITIES = { "360":[], "480":[], "720":[], "1080":[], } url = self.url.replace('https:////','https://') soup = helpers.get(url).text regex = r'https://vidstreaming\.io/download\?[^"]*' download = re.search(regex,soup).group() soup = helpers.soupify(helpers.get(download)) links = soup.select('div.mirror_link')[0].select('div.dowload > a') for a in QUALITIES: for b in links: if a in b.text: QUALITIES[a].append(b.get('href')) stream_url = QUALITIES[self.quality[:-1]][0] if QUALITIES != {"360":[],"480":[],"720":[],"1080":[],} else links[0].get('href') #In case nothing is found return { 'stream_url': stream_url, 'referer': download }
2.71875
3
gui/sum_v1/views.py
time-crunched/nlp-toolbox
0
3105
<filename>gui/sum_v1/views.py import time import os from django.shortcuts import render, redirect from django.http import JsonResponse from django.views import View from django.conf import settings from .forms import File_uploadForm from .models import File_upload, SummaryRes from sim_v1.textsummary import TEXTSummary summary_document_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)),'media','sum_v1','upload') #summary_document_dir = r'C:\Users\ERDIG\Dropbox\Python\nlp_v1\media\sum_v1\upload' summary_extraction_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)),'media','sum_v1','temp') #summary_extraction_dir = r'C:\Users\ERDIG\Dropbox\Python\nlp_v1\media\sum_v1\temp' summary_ratio = 0.01 class Upload(View): def post(self, request): time.sleep(1) # You don't need this line. This is just to delay the process so you can see the progress bar testing locally. form = File_uploadForm(self.request.POST, self.request.FILES) print(form.errors) if form.is_valid(): document = form.save() data = {'is_valid': True, 'name': document.file.name, 'url': document.file.url} else: data = {'is_valid': False} return JsonResponse(data) def get(self, request): for document in File_upload.objects.all(): document.file.delete() document.delete() doc_list = File_upload.objects.all() form = File_uploadForm() return render(self.request, 'upload.html', {'documents': doc_list, 'form': form,}) def sum_words(request): if request.method == 'POST': form = File_uploadForm(request.POST) if form.is_valid(): form.save() sum_words = form.cleaned_data['sum_words'] request.session['sum_words'] = sum_words else: pass else: pass return redirect('sum_v1:summarize') def clear_database(request): for document in File_upload.objects.all(): document.file.delete() document.delete() return redirect(request.POST.get('next')) def Summarize(request): SummaryRes.objects.all().delete() summary_word_count = request.session['sum_words'] for document in os.listdir(summary_document_dir): for filename in os.listdir(summary_extraction_dir): os.remove(os.path.join(summary_extraction_dir, filename)) text_dir = os.path.join(summary_document_dir, document) summary = TEXTSummary(text_dir, summary_extraction_dir, summary_ratio, summary_word_count) summary.textextraction() summary.summary() SummaryRes.objects.create(doc = document, summary = summary.summary) results = SummaryRes.objects.all() return render(request, 'summarize.html', {'results': results})
2.171875
2
homeassistant/components/websocket_api/__init__.py
dannyqwertz/home-assistant
4
3106
<reponame>dannyqwertz/home-assistant """ Websocket based API for Home Assistant. For more details about this component, please refer to the documentation at https://developers.home-assistant.io/docs/external_api_websocket.html """ from homeassistant.core import callback from homeassistant.loader import bind_hass from . import commands, connection, const, decorators, http, messages DOMAIN = const.DOMAIN DEPENDENCIES = ('http',) # Backwards compat / Make it easier to integrate # pylint: disable=invalid-name ActiveConnection = connection.ActiveConnection BASE_COMMAND_MESSAGE_SCHEMA = messages.BASE_COMMAND_MESSAGE_SCHEMA error_message = messages.error_message result_message = messages.result_message async_response = decorators.async_response require_admin = decorators.require_admin ws_require_user = decorators.ws_require_user # pylint: enable=invalid-name @bind_hass @callback def async_register_command(hass, command, handler, schema): """Register a websocket command.""" handlers = hass.data.get(DOMAIN) if handlers is None: handlers = hass.data[DOMAIN] = {} handlers[command] = (handler, schema) async def async_setup(hass, config): """Initialize the websocket API.""" hass.http.register_view(http.WebsocketAPIView) commands.async_register_commands(hass) return True
2.203125
2
test_app/settings.py
Lenders-Cooperative/Django-DocuSign
0
3107
<reponame>Lenders-Cooperative/Django-DocuSign<filename>test_app/settings.py # # Created on Tue Dec 21 2021 # # Copyright (c) 2021 Lenders Cooperative, a division of Summit Technology Group, Inc. # """ Django settings for test_app project. Generated by 'django-admin startproject' using Django 3.1.7. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ from pathlib import Path import environ env = environ.Env() # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = "<KEY>" # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", "los_docusign.apps.LosDocusignConfig", "test_app.test_organization.apps.TestOrganizationConfig", "django_lc_utils", ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] ROOT_URLCONF = "test_app.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] WSGI_APPLICATION = "test_app.wsgi.application" # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": "docusign_new_poc", "USER": "postgres", "PASSWORD": "admin", "HOST": "localhost", "PORT": "5432", } } # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = "en-us" TIME_ZONE = "UTC" USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.1/howto/static-files/ STATIC_URL = "/static/" BASE_DIR = Path(__file__).resolve().parent DOCUSIGN_API_ACCOUNT_ID = env( "DOCUSIGN_API_ACCOUNT_ID", default="<Docusign API Account Id >" ) DOCUSIGN_CLIENT_ID = env("DOCUSIGN_CLIENT_ID", default="<Docusign Client Id>") DOCUSIGN_API_ENDPOINT = env( "DOCUSIGN_API_ENDPOINT", default="https://demo.docusign.net/restapi/v2.1/accounts/" ) DOCUSIGN_TOKEN_EXPIRY_IN_SECONDS = env("DOCUSIGN_TOKEN_EXPIRY_IN_SECONDS", default=3600) DOCUSIGN_AUTHORIZATION_SERVER = env( "DOCUSIGN_AUTHORIZATION_SERVER", default="account-d.docusign.com" ) DOCUSIGN_PRIVATE_KEY_FILE = env( "DOCUSIGN_PRIVATE_KEY_FILE", default="<Private Key file data>", ) DOCUSIGN_ENABLE_KBA = env("DOCUSIGN_ENABLE_KBA", default=False)
1.710938
2
tests/unit/ppr/test_search_query.py
doug-lovett/test-schemas-dl
0
3108
# Copyright © 2020 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test Suite to ensure the PPR Search Query schema is valid. """ import copy from registry_schemas import validate from registry_schemas.example_data.ppr import SEARCH_QUERY def test_valid_search_query_ind_debtor(): """Assert that the schema is performing as expected for a search by individual debtor.""" query = copy.deepcopy(SEARCH_QUERY) query['type'] = 'INDIVIDUAL_DEBTOR' del query['criteria']['debtorName']['business'] del query['criteria']['value'] del query['clientReferenceId'] del query['startDateTime'] del query['endDateTime'] is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert is_valid def test_valid_search_query_bus_debtor(): """Assert that the schema is performing as expected for a search by business debtor.""" query = copy.deepcopy(SEARCH_QUERY) query['type'] = 'BUSINESS_DEBTOR' del query['criteria']['debtorName']['first'] del query['criteria']['debtorName']['second'] del query['criteria']['debtorName']['last'] del query['criteria']['value'] is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert is_valid def test_valid_search_query_airdot(): """Assert that the schema is performing as expected for a search by aircraft DOT.""" query = copy.deepcopy(SEARCH_QUERY) query['type'] = 'AIRCRAFT_DOT' del query['criteria']['debtorName'] query['criteria']['value'] = 'CFYXW' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert is_valid def test_valid_search_query_regnum(): """Assert that the schema is performing as expected for a search by registration number.""" query = copy.deepcopy(SEARCH_QUERY) query['type'] = 'REGISTRATION_NUMBER' del query['criteria']['debtorName'] query['criteria']['value'] = '023001B' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert is_valid def test_valid_search_query_mhrnum(): """Assert that the schema is performing as expected for a search by MHR number.""" query = copy.deepcopy(SEARCH_QUERY) query['type'] = 'MHR_NUMBER' del query['criteria']['debtorName'] query['criteria']['value'] = '21324' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert is_valid def test_valid_search_query_serialnum(): """Assert that the schema is performing as expected for a search by serial number.""" query = copy.deepcopy(SEARCH_QUERY) query['type'] = 'SERIAL_NUMBER' del query['criteria']['debtorName'] query['criteria']['value'] = 'KM8J3CA46JU622994' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert is_valid def test_invalid_search_query_missing_type(): """Assert that an invalid search query fails - type is missing.""" query = copy.deepcopy(SEARCH_QUERY) del query['type'] del query['criteria']['debtorName']['business'] del query['criteria']['value'] is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_missing_criteria(): """Assert that an invalid search query fails - criteria is missing.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria'] is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_type(): """Assert that an invalid search query fails - type is invalid.""" query = copy.deepcopy(SEARCH_QUERY) query['type'] = 'XXXXXXXX' del query['criteria']['debtorName']['business'] del query['criteria']['value'] is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_criteria(): """Assert that an invalid search query fails - criteria is invalid.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['debtorName']['business'] is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_busname(): """Assert that an invalid search query fails - business name is too short.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['debtorName']['first'] del query['criteria']['debtorName']['second'] del query['criteria']['debtorName']['last'] del query['criteria']['value'] query['criteria']['debtorName']['business'] = 'XXXX' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_value(): """Assert that an invalid search query fails - value is too long.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['debtorName'] query['criteria']['value'] = 'XxxxxxxxxxxxxxxxxxxxXxxxxxxxxxxxxxxxxxxxXxxxxxxxxxx' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_debtor(): """Assert that an invalid search query fails - debtor name is invalid.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['value'] is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_firstname(): """Assert that an invalid search query fails - debtor first name is too long.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['value'] del query['criteria']['debtorName']['business'] query['criteria']['debtorName']['first'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_secondname(): """Assert that an invalid search query fails - debtor second name is too long.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['value'] del query['criteria']['debtorName']['business'] query['criteria']['debtorName']['second'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_lastname(): """Assert that an invalid search query fails - debtor last name is too long.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['value'] del query['criteria']['debtorName']['business'] query['criteria']['debtorName']['last'] = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_clientref(): """Assert that an invalid search query fails - client reference id is too long.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['value'] del query['criteria']['debtorName']['business'] query['clientReferenceId'] = 'XxxxxxxxxxXxxxxxxxxxX' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_startts(): """Assert that an invalid search query fails - start date time format is invalid.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['value'] del query['criteria']['debtorName']['business'] query['startDateTime'] = 'Xxxxxxxxxx' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid def test_invalid_search_query_endts(): """Assert that an invalid search query fails - end date time format is invalid.""" query = copy.deepcopy(SEARCH_QUERY) del query['criteria']['value'] del query['criteria']['debtorName']['business'] query['endDateTime'] = 'Xxxxxxxxxx' is_valid, errors = validate(query, 'searchQuery', 'ppr') if errors: for err in errors: print(err.message) print(errors) assert not is_valid
2.03125
2
devopsipy/decorators.py
kharnam/devopsipy
0
3109
""" Module to contain Pywork decorators """ __author__ = '<NAME>' import re import time import itertools import logging log = logging.getLogger(__name__)
2.375
2
tests/test_decorators.py
stephenfin/django-rest-framework
1
3110
from __future__ import unicode_literals import pytest from django.test import TestCase from rest_framework import status from rest_framework.authentication import BasicAuthentication from rest_framework.decorators import ( action, api_view, authentication_classes, detail_route, list_route, parser_classes, permission_classes, renderer_classes, schema, throttle_classes ) from rest_framework.parsers import JSONParser from rest_framework.permissions import IsAuthenticated from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from rest_framework.schemas import AutoSchema from rest_framework.test import APIRequestFactory from rest_framework.throttling import UserRateThrottle from rest_framework.views import APIView class DecoratorTestCase(TestCase): def setUp(self): self.factory = APIRequestFactory() def _finalize_response(self, request, response, *args, **kwargs): response.request = request return APIView.finalize_response(self, request, response, *args, **kwargs) def test_api_view_incorrect(self): """ If @api_view is not applied correct, we should raise an assertion. """ @api_view def view(request): return Response() request = self.factory.get('/') self.assertRaises(AssertionError, view, request) def test_api_view_incorrect_arguments(self): """ If @api_view is missing arguments, we should raise an assertion. """ with self.assertRaises(AssertionError): @api_view('GET') def view(request): return Response() def test_calling_method(self): @api_view(['GET']) def view(request): return Response({}) request = self.factory.get('/') response = view(request) assert response.status_code == status.HTTP_200_OK request = self.factory.post('/') response = view(request) assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED def test_calling_put_method(self): @api_view(['GET', 'PUT']) def view(request): return Response({}) request = self.factory.put('/') response = view(request) assert response.status_code == status.HTTP_200_OK request = self.factory.post('/') response = view(request) assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED def test_calling_patch_method(self): @api_view(['GET', 'PATCH']) def view(request): return Response({}) request = self.factory.patch('/') response = view(request) assert response.status_code == status.HTTP_200_OK request = self.factory.post('/') response = view(request) assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED def test_renderer_classes(self): @api_view(['GET']) @renderer_classes([JSONRenderer]) def view(request): return Response({}) request = self.factory.get('/') response = view(request) assert isinstance(response.accepted_renderer, JSONRenderer) def test_parser_classes(self): @api_view(['GET']) @parser_classes([JSONParser]) def view(request): assert len(request.parsers) == 1 assert isinstance(request.parsers[0], JSONParser) return Response({}) request = self.factory.get('/') view(request) def test_authentication_classes(self): @api_view(['GET']) @authentication_classes([BasicAuthentication]) def view(request): assert len(request.authenticators) == 1 assert isinstance(request.authenticators[0], BasicAuthentication) return Response({}) request = self.factory.get('/') view(request) def test_permission_classes(self): @api_view(['GET']) @permission_classes([IsAuthenticated]) def view(request): return Response({}) request = self.factory.get('/') response = view(request) assert response.status_code == status.HTTP_403_FORBIDDEN def test_throttle_classes(self): class OncePerDayUserThrottle(UserRateThrottle): rate = '1/day' @api_view(['GET']) @throttle_classes([OncePerDayUserThrottle]) def view(request): return Response({}) request = self.factory.get('/') response = view(request) assert response.status_code == status.HTTP_200_OK response = view(request) assert response.status_code == status.HTTP_429_TOO_MANY_REQUESTS def test_schema(self): """ Checks CustomSchema class is set on view """ class CustomSchema(AutoSchema): pass @api_view(['GET']) @schema(CustomSchema()) def view(request): return Response({}) assert isinstance(view.cls.schema, CustomSchema) class ActionDecoratorTestCase(TestCase): def test_defaults(self): @action(detail=True) def test_action(request): """Description""" assert test_action.mapping == {'get': 'test_action'} assert test_action.detail is True assert test_action.url_path == 'test_action' assert test_action.url_name == 'test-action' assert test_action.kwargs == { 'name': 'Test action', 'description': 'Description', } def test_detail_required(self): with pytest.raises(AssertionError) as excinfo: @action() def test_action(request): raise NotImplementedError assert str(excinfo.value) == "@action() missing required argument: 'detail'" def test_method_mapping_http_methods(self): # All HTTP methods should be mappable @action(detail=False, methods=[]) def test_action(): raise NotImplementedError for name in APIView.http_method_names: def method(): raise NotImplementedError # Python 2.x compatibility - cast __name__ to str method.__name__ = str(name) getattr(test_action.mapping, name)(method) # ensure the mapping returns the correct method name for name in APIView.http_method_names: assert test_action.mapping[name] == name def test_view_name_kwargs(self): """ 'name' and 'suffix' are mutually exclusive kwargs used for generating a view's display name. """ # by default, generate name from method @action(detail=True) def test_action(request): raise NotImplementedError assert test_action.kwargs == { 'description': None, 'name': '<NAME>', } # name kwarg supersedes name generation @action(detail=True, name='<NAME>') def test_action(request): raise NotImplementedError assert test_action.kwargs == { 'description': None, 'name': '<NAME>', } # suffix kwarg supersedes name generation @action(detail=True, suffix='Suffix') def test_action(request): raise NotImplementedError assert test_action.kwargs == { 'description': None, 'suffix': 'Suffix', } # name + suffix is a conflict. with pytest.raises(TypeError) as excinfo: action(detail=True, name='test name', suffix='Suffix') assert str(excinfo.value) == "`name` and `suffix` are mutually exclusive arguments." def test_method_mapping(self): @action(detail=False) def test_action(request): raise NotImplementedError @test_action.mapping.post def test_action_post(request): raise NotImplementedError # The secondary handler methods should not have the action attributes for name in ['mapping', 'detail', 'url_path', 'url_name', 'kwargs']: assert hasattr(test_action, name) and not hasattr(test_action_post, name) def test_method_mapping_already_mapped(self): @action(detail=True) def test_action(request): raise NotImplementedError msg = "Method 'get' has already been mapped to '.test_action'." with self.assertRaisesMessage(AssertionError, msg): @test_action.mapping.get def test_action_get(request): raise NotImplementedError def test_method_mapping_overwrite(self): @action(detail=True) def test_action(): raise NotImplementedError msg = ("Method mapping does not behave like the property decorator. You " "cannot use the same method name for each mapping declaration.") with self.assertRaisesMessage(AssertionError, msg): @test_action.mapping.post def test_action(): raise NotImplementedError def test_detail_route_deprecation(self): with pytest.warns(DeprecationWarning) as record: @detail_route() def view(request): raise NotImplementedError assert len(record) == 1 assert str(record[0].message) == ( "`detail_route` is deprecated and will be removed in " "3.10 in favor of `action`, which accepts a `detail` bool. Use " "`@action(detail=True)` instead." ) def test_list_route_deprecation(self): with pytest.warns(DeprecationWarning) as record: @list_route() def view(request): raise NotImplementedError assert len(record) == 1 assert str(record[0].message) == ( "`list_route` is deprecated and will be removed in " "3.10 in favor of `action`, which accepts a `detail` bool. Use " "`@action(detail=False)` instead." ) def test_route_url_name_from_path(self): # pre-3.8 behavior was to base the `url_name` off of the `url_path` with pytest.warns(DeprecationWarning): @list_route(url_path='foo_bar') def view(request): raise NotImplementedError assert view.url_path == 'foo_bar' assert view.url_name == 'foo-bar'
2.1875
2
tamilmorse/morse_encode.py
CRE2525/open-tamil
1
3111
<reponame>CRE2525/open-tamil<filename>tamilmorse/morse_encode.py ## -*- coding: utf-8 -*- #(C) 2018 <NAME> # This file is part of Open-Tamil project # You may use or distribute this file under terms of MIT license import codecs import json import tamil import sys import os #e.g. python morse_encode.py கலைஞர் CURRDIR = os.path.dirname(os.path.realpath(__file__)) def encode(text): with codecs.open(os.path.join(CURRDIR,"data","madurai_tamilmorse.json"),"r","utf-8") as fp: codebook = json.loads(fp.read()) output = [codebook.get(l,l) for l in tamil.utf8.get_letters(text)] return u" ".join(output) if __name__ == u"__main__": encode(u" ".join([i.decode("utf-8") for i in sys.argv[1:]]))
2.703125
3
Leetcode/Python/_1721.py
Xrenya/algorithms
0
3112
<filename>Leetcode/Python/_1721.py<gh_stars>0 # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]: temp = head array = [] while temp: array.append(temp.val) temp = temp.next array[k - 1], array[len(array) - k] = array[len(array) - k], array[k - 1] head = ListNode(0) dummy = head for num in array: dummy.next = ListNode(num) dummy = dummy.next return head.next # Definition for singly-linked list. # class ListNode: # def __init__(self, val=0, next=None): # self.val = val # self.next = next class Solution: def swapNodes(self, head: Optional[ListNode], k: int) -> Optional[ListNode]: if head is None or head.next is None: return head slow = fast = cnt = head counter = 0 while cnt: counter += 1 cnt = cnt.next for _ in range(k - 1): slow = slow.next for _ in range(counter - k): fast = fast.next slow.val, fast.val = fast.val, slow.val return head
3.75
4
contrib/functional_tests/functional/test_reorg.py
electrumsv/electrumsv
136
3113
<reponame>electrumsv/electrumsv<filename>contrib/functional_tests/functional/test_reorg.py """ Warning - this will reset all components back to a blank state before running the simulation Runs node1, electrumx1 and electrumsv1 and loads the default wallet on the daemon (so that newly submitted blocks will be synchronized by ElectrumSV reorged txid: 'a1fa9460ca105c1396cd338f7fa202bf79a9d244d730e91e19f6302a05b2f07a' """ import asyncio import os from pathlib import Path import pytest import pytest_asyncio from electrumsv_node import electrumsv_node from electrumsv_sdk import utils import logging import requests from contrib.functional_tests.websocket_client import TxStateWSClient MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("simulate-fresh-reorg") async def wait_for_reog_transaction_update(reorged_txids, reorg_height): MAX_WAIT_TIME = 10 # seconds async with TxStateWSClient() as ws_client: try: await asyncio.wait_for(ws_client.block_until_confirmed_and_height_updated( reorged_txids, reorg_height), MAX_WAIT_TIME) except asyncio.TimeoutError: logger.exception(f"timed out after {MAX_WAIT_TIME} seconds") raise class TestReorg: @classmethod def setup_class(cls): pass @classmethod def teardown_class(cls): pass @pytest.mark.asyncio def test_reorg(self, event_loop): async def test_reorg(): payload = { "password": "<PASSWORD>" } REORGED_TXIDS = "a1fa9460ca105c1396cd338f7fa202bf79a9d244d730e91e19f6302a05b2f07a" # Load the default wallet on ElectrumSV daemon url = f"http://127.0.0.1:9999/v1/regtest/dapp/wallets/worker1.sqlite/load_wallet" result = requests.post(url, json=payload) result.raise_for_status() # Submit node1 blocks to node if electrumsv_node.is_node_running(): utils.submit_blocks_from_file(node_id='node1', filepath=Path(MODULE_DIR).joinpath('../reorg_blocks/node1_blocks.dat')) else: logger.exception("node unavailable") try: await wait_for_reog_transaction_update([REORGED_TXIDS], 201) # Todo check state of get_balance; get_coin_state; get_transaction_history # Submit node2 blocks to node if electrumsv_node.is_node_running(): utils.submit_blocks_from_file(node_id='node1', filepath=Path(MODULE_DIR).joinpath('../reorg_blocks/node2_blocks.dat')) else: logger.exception("node unavailable") await wait_for_reog_transaction_update([REORGED_TXIDS], 202) except asyncio.TimeoutError: pytest.xfail("work in progress alongside refactoring changes...") # Todo check state of get_balance; get_coin_state; get_transaction_history event_loop.run_until_complete(test_reorg())
2.078125
2
Pyrado/pyrado/environments/mujoco/wam_bic.py
KhanhThiVo/SimuRLacra
0
3114
# Copyright (c) 2020, <NAME>, Honda Research Institute Europe GmbH, and # Technical University of Darmstadt. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of <NAME>, Honda Research Institute Europe GmbH, # or Technical University of Darmstadt, nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <NAME>, HONDA RESEARCH INSTITUTE EUROPE GMBH, # OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import mujoco_py import numpy as np import os.path as osp from init_args_serializer import Serializable from typing import Optional import pyrado from pyrado.environments.barrett_wam import ( goal_pos_init_sim_4dof, goal_pos_init_sim_7dof, init_qpos_des_4dof, init_qpos_des_7dof, act_space_bic_4dof, act_space_bic_7dof, wam_q_limits_up_7dof, wam_q_limits_lo_7dof, torque_space_wam_4dof, torque_space_wam_7dof, wam_pgains_7dof, wam_dgains_7dof, wam_pgains_4dof, wam_dgains_4dof, ) from pyrado.environments.mujoco.base import MujocoSimEnv from pyrado.spaces.base import Space from pyrado.spaces.box import BoxSpace from pyrado.spaces.singular import SingularStateSpace from pyrado.tasks.base import Task from pyrado.tasks.condition_only import ConditionOnlyTask from pyrado.tasks.desired_state import DesStateTask from pyrado.tasks.final_reward import BestStateFinalRewTask, FinalRewTask, FinalRewMode from pyrado.tasks.goalless import GoallessTask from pyrado.tasks.masked import MaskedTask from pyrado.tasks.parallel import ParallelTasks from pyrado.tasks.reward_functions import ZeroPerStepRewFcn, ExpQuadrErrRewFcn, QuadrErrRewFcn from pyrado.tasks.sequential import SequentialTasks from pyrado.utils.data_types import EnvSpec from pyrado.utils.input_output import print_cbt class WAMBallInCupSim(MujocoSimEnv, Serializable): """ WAM robotic arm from Barrett technologies for the ball-in-the-cup task, controlled by a PD controller. .. note:: When using the `reset()` function, always pass a meaningful `init_state` .. seealso:: [1] https://github.com/psclklnk/self-paced-rl/tree/master/sprl/envs/ball_in_a_cup.py """ name: str = "wam-bic" def __init__( self, num_dof: int, frame_skip: int = 4, dt: Optional[float] = None, max_steps: int = pyrado.inf, fixed_init_state: bool = True, stop_on_collision: bool = True, observe_ball: bool = False, observe_cup: bool = False, task_args: Optional[dict] = None, ): """ Constructor :param num_dof: number of degrees of freedom (4 or 7), depending on which Barrett WAM setup being used :param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of the time step size `dt` :param dt: by default the time step size is the one from the mujoco config file multiplied by the number of frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`. :param max_steps: max number of simulation time steps :param fixed_init_state: enables/disables deterministic, fixed initial state :param stop_on_collision: set the `failed` flag in the `dict` returned by `_mujoco_step()` to true, if the ball collides with something else than the desired parts of the cup. This causes the episode to end. Keep in mind that in case of a negative step reward and no final cost on failing, this might result in undesired behavior. :param observe_ball: if `True`, include the 2-dim (x-z plane) cartesian ball position into the observation :param observe_cup: if `True`, include the 2-dim (x-z plane) cartesian cup position into the observation :param task_args: arguments for the task construction """ Serializable._init(self, locals()) self.fixed_init_state = fixed_init_state self.observe_ball = observe_ball self.observe_cup = observe_cup # Initialize num DoF specific variables self._num_dof = num_dof if num_dof == 4: graph_file_name = "wam_4dof_bic.xml" self.qpos_des_init = init_qpos_des_4dof self.p_gains = wam_pgains_4dof self.d_gains = wam_dgains_4dof init_ball_pos = np.array([0.723, 0.0, 1.168]) init_cup_goal = goal_pos_init_sim_4dof elif num_dof == 7: graph_file_name = "wam_7dof_bic.xml" self.qpos_des_init = init_qpos_des_7dof self.p_gains = wam_pgains_7dof self.d_gains = wam_dgains_7dof init_ball_pos = np.array([0.828, 0.0, 1.131]) init_cup_goal = goal_pos_init_sim_7dof else: raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7") model_path = osp.join(pyrado.MUJOCO_ASSETS_DIR, graph_file_name) super().__init__(model_path, frame_skip, dt, max_steps, task_args) # Actual initial joint position (when the WAM moved to the home position) if num_dof == 4: self.init_qpos[:4] = np.array([0.0, 0.63, 0.0, 1.27]) self.init_qpos[4] = -0.34 # angle of the first rope segment relative to the cup bottom plate else: self.init_qpos[:7] = np.array([0.0, 0.65, 0.0, 1.41, 0.0, -0.28, -1.57]) self.init_qpos[7] = -0.21 # angle of the first rope segment relative to the cup bottom plate # Set the actual stable initial position. This position would be reached after some time using the internal # PD controller to stabilize at self._qpos_des_init. # The initial position of the ball in cartesian coordinates self._init_state = np.concatenate([self.init_qpos, self.init_qvel, init_ball_pos, init_cup_goal]) if self.fixed_init_state: self._init_space = SingularStateSpace(self._init_state) else: # Add plus/minus one degree to each motor joint and the first rope segment joint init_state_up = self._init_state.copy() init_state_up[: self._num_dof] += np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof] init_state_lo = self._init_state.copy() init_state_lo[: self._num_dof] -= np.pi / 180 * np.array([0.1, 1, 0.5, 1.0, 0.1, 1.0, 1.0])[: self._num_dof] self._init_space = BoxSpace(init_state_lo, init_state_up) # Bodies to check fo collision self._collision_bodies = [ "wam/base_link", "wam/shoulder_yaw_link", "wam/shoulder_pitch_link", "wam/upper_arm_link", "wam/forearm_link", "wrist_palm_link", "wam/wrist_pitch_link", "wam/wrist_yaw_link", ] if self._num_dof == 4: self._collision_bodies = self._collision_bodies[:6] # We access a private attribute since a method like 'model.geom_names[geom_id]' cannot be used because # not every geom has a name self._collision_geom_ids = [self.model._geom_name2id[name] for name in ["cup_geom1", "cup_geom2"]] self.stop_on_collision = stop_on_collision self.camera_config = dict( distance=2.7, trackbodyid=0, # id of the body to track elevation=-30, # camera rotation around the axis in the plane azimuth=-90, # camera rotation around the camera's vertical axis ) @property def num_dof(self) -> int: """ Get the number of degrees of freedom. """ return self._num_dof @property def torque_space(self) -> Space: """ Get the space of joint torques. """ return torque_space_wam_7dof if self._num_dof == 7 else torque_space_wam_4dof @property def state_space(self) -> Space: # The state space has the same shape as the init space (including ball and cup) state_shape = np.concatenate([self.init_qpos, self.init_qvel, np.empty(3), np.empty(3)]).shape state_lo, state_up = np.full(state_shape, -pyrado.inf), np.full(state_shape, pyrado.inf) # Ensure that joint limits of the arm are not reached (5 deg safety margin) state_lo[: self._num_dof] = wam_q_limits_lo_7dof[: self._num_dof] state_up[: self._num_dof] = wam_q_limits_up_7dof[: self._num_dof] return BoxSpace(state_lo, state_up) @property def obs_space(self) -> Space: # Observing the normalized time and optionally the cup and ball position obs_lo, obs_up, labels = [0.0], [1.0], ["t"] if self.observe_ball: obs_lo.extend([-3.0, -3.0]) obs_up.extend([3.0, 3.0]) labels.extend(["ball_x", "ball_z"]) if self.observe_cup: obs_lo.extend([-3.0, -3.0]) obs_up.extend([3.0, 3.0]) labels.extend(["cup_x", "cup_z"]) return BoxSpace(obs_lo, obs_up, labels=labels) @property def act_space(self) -> Space: # Running a PD controller on joint positions and velocities return act_space_bic_7dof if self._num_dof == 7 else act_space_bic_4dof @classmethod def get_nominal_domain_param(cls, num_dof: int = 7) -> dict: if num_dof == 7: return dict( cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65) rope_length=0.41, # length of the rope [m] ball_mass=0.024, # mass of the ball [kg] joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_5_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_6_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_7_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-] joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-] joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-] joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-] joint_5_dryfriction=0.4, # dry friction coefficient of motor joint 5 [-] joint_6_dryfriction=0.4, # dry friction coefficient of motor joint 6 [-] joint_7_dryfriction=0.4, # dry friction coefficient of motor joint 7 [-] rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6) ) elif num_dof == 4: return dict( cup_scale=1.0, # scaling factor for the radius of the cup [-] (should be >0.65) rope_length=0.41, # length of the rope [m] ball_mass=0.024, # mass of the ball [kg] joint_1_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_2_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_3_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_4_damping=0.05, # damping of motor joints [N/s] (default value is small) joint_1_dryfriction=0.4, # dry friction coefficient of motor joint 1 [-] joint_2_dryfriction=0.4, # dry friction coefficient of motor joint 2 [-] joint_3_dryfriction=0.4, # dry friction coefficient of motor joint 3 [-] joint_4_dryfriction=0.4, # dry friction coefficient of motor joint 4 [-] rope_damping=1e-4, # damping of rope joints [N/s] (reasonable values are 6e-4 to 1e-6) ) else: raise pyrado.ValueErr(given=num_dof, eq_constraint="4 or 7") def _create_task(self, task_args: dict) -> Task: if task_args.get("sparse_rew_fcn", False): # Create a task with binary reward return self._create_main_task(task_args) else: # Create two (or three) parallel running task. # 1.) Main task: Desired state task for the cartesian ball distance # 2.) Deviation task: Desired state task for the cartesian- and joint deviation from the init position # 3.) Binary Bonus: Adds a binary bonus when ball is catched [inactive by default] return ParallelTasks( [ self._create_main_task(task_args), self._create_deviation_task(task_args), self._create_main_task( dict( sparse_rew_fcn=True, success_bonus=task_args.get("success_bonus", 0), ) ), ] ) def _create_main_task(self, task_args: dict) -> Task: # Create a DesStateTask that masks everything but the ball position idcs = list(range(self.state_space.flat_dim - 6, self.state_space.flat_dim - 3)) # Cartesian ball position spec = EnvSpec( self.spec.obs_space, self.spec.act_space, self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)), ) # If we do not use copy(), state_des coming from MuJoCo is a reference and updates automatically at each step. # Note: sim.forward() + get_body_xpos() results in wrong output for state_des, as sim has not been updated to # init_space.sample(), which is first called in reset() if task_args.get("sparse_rew_fcn", False): factor = task_args.get("success_bonus", 1) # Binary final reward task main_task = FinalRewTask( ConditionOnlyTask( spec, condition_fcn=self.check_ball_in_cup, is_success_condition=True, ), mode=FinalRewMode(always_positive=True), factor=factor, ) # Yield -1 on fail after the main task ist done (successfully or not) dont_fail_after_succ_task = FinalRewTask( GoallessTask(spec, ZeroPerStepRewFcn()), mode=FinalRewMode(always_negative=True), factor=factor, ) # Augment the binary task with an endless dummy task, to avoid early stopping task = SequentialTasks((main_task, dont_fail_after_succ_task)) return MaskedTask(self.spec, task, idcs) else: state_des = self.sim.data.get_site_xpos("cup_goal") # this is a reference # state_des_ball = self.sim.data.get_site_xpos("cup_goal") # this is a reference # state_des_cup = np.array([0.82521, 0, 1.4469]) if self._num_dof == 7 else np.array([0.758, 0, 1.5]) # state_des = np.concatenate([state_des_ball, state_des_cup]) R_default = np.diag([0, 0, 1, 1e-2, 1e-2, 1e-1]) if self._num_dof == 7 else np.diag([0, 0, 1e-2, 1e-2]) rew_fcn = ExpQuadrErrRewFcn( Q=task_args.get("Q", np.diag([2e1, 1e-4, 2e1])), # distance ball - cup; shouldn't move in y-direction R=task_args.get("R", R_default), # last joint is really unreliable for 7 dof, thus punish more ) task = DesStateTask(spec, state_des, rew_fcn) # Wrap the masked DesStateTask to add a bonus for the best state in the rollout return BestStateFinalRewTask( MaskedTask(self.spec, task, idcs), factor=task_args.get("final_factor", 0.05 * self.max_steps), ) def _create_deviation_task(self, task_args: dict) -> Task: idcs = list(range(self.state_space.flat_dim - 3, self.state_space.flat_dim)) # Cartesian cup goal position spec = EnvSpec( self.spec.obs_space, self.spec.act_space, self.spec.state_space.subspace(self.spec.state_space.create_mask(idcs)), ) # init cup goal position state_des = goal_pos_init_sim_7dof if self._num_dof == 7 else goal_pos_init_sim_4dof rew_fcn = QuadrErrRewFcn( Q=task_args.get("Q_dev", np.diag([2e-1, 1e-6, 5e0])), # Cartesian distance from init cup position R=task_args.get( "R_dev", np.zeros((self.act_space.shape[0], self.act_space.shape[0])) ), # joint space distance from init pose, interferes with R_default from _create_main_task ) task = DesStateTask(spec, state_des, rew_fcn) return MaskedTask(self.spec, task, idcs) def _adapt_model_file(self, xml_model: str, domain_param: dict) -> str: # First replace special domain parameters cup_scale = domain_param.pop("cup_scale", None) rope_length = domain_param.pop("rope_length", None) if cup_scale is not None: # See [1, l.93-96] xml_model = xml_model.replace("[scale_mesh]", str(cup_scale * 0.001)) xml_model = xml_model.replace("[pos_mesh]", str(0.055 - (cup_scale - 1.0) * 0.023)) xml_model = xml_model.replace("[pos_goal]", str(0.1165 + (cup_scale - 1.0) * 0.0385)) xml_model = xml_model.replace("[size_cup]", str(cup_scale * 0.038)) xml_model = xml_model.replace("[size_cup_inner]", str(cup_scale * 0.03)) if rope_length is not None: # The rope consists of 30 capsules xml_model = xml_model.replace("[pos_capsule]", str(rope_length / 30)) # Each joint is at the top of each capsule (therefore negative direction from center) xml_model = xml_model.replace("[pos_capsule_joint]", str(-rope_length / 60)) # Pure visualization component xml_model = xml_model.replace("[size_capsule_geom]", str(rope_length / 72)) # Resolve mesh directory and replace the remaining domain parameters return super()._adapt_model_file(xml_model, domain_param) def _mujoco_step(self, act: np.ndarray) -> dict: assert self.act_space.contains(act, verbose=True) # Get the desired positions and velocities for the selected joints qpos_des = self.qpos_des_init.copy() # the desired trajectory is relative to self._qpos_des_init qvel_des = np.zeros_like(qpos_des) if self._num_dof == 4: np.add.at(qpos_des, [1, 3], act[:2]) np.add.at(qvel_des, [1, 3], act[2:]) elif self._num_dof == 7: np.add.at(qpos_des, [1, 3, 5], act[:3]) np.add.at(qvel_des, [1, 3, 5], act[3:]) # Compute the position and velocity errors err_pos = qpos_des - self.state[: self._num_dof] err_vel = qvel_des - self.state[self.model.nq : self.model.nq + self._num_dof] # Compute the torques for the PD controller and clip them to their max values torque = self.p_gains * err_pos + self.d_gains * err_vel torque = self.torque_space.project_to(torque) # Apply the torques to the robot self.sim.data.qfrc_applied[: self._num_dof] = torque # Call MuJoCo try: self.sim.step() mjsim_crashed = False except mujoco_py.builder.MujocoException: # When MuJoCo recognized instabilities in the simulation, it simply kills it. # Instead, we want the episode to end with a failure. mjsim_crashed = True qpos, qvel = self.sim.data.qpos.copy(), self.sim.data.qvel.copy() ball_pos = self.sim.data.get_body_xpos("ball").copy() cup_goal = self.sim.data.get_site_xpos("cup_goal").copy() self.state = np.concatenate([qpos, qvel, ball_pos, cup_goal]) # If desired, check for collisions of the ball with the robot ball_collided = self.check_ball_collisions() if self.stop_on_collision else False # If state is out of bounds (this is normally checked by the task, but does not work because of the mask) state_oob = False if self.state_space.contains(self.state) else True return dict( qpos_des=qpos_des, qvel_des=qvel_des, qpos=qpos[: self._num_dof], qvel=qvel[: self._num_dof], ball_pos=ball_pos, cup_pos=cup_goal, failed=mjsim_crashed or ball_collided or state_oob, ) def check_ball_collisions(self, verbose: bool = False) -> bool: """ Check if an undesired collision with the ball occurs. :param verbose: print messages on collision :return: `True` if the ball collides with something else than the central parts of the cup """ for i in range(self.sim.data.ncon): # Get current contact object contact = self.sim.data.contact[i] # Extract body-id and body-name of both contact geoms body1 = self.model.geom_bodyid[contact.geom1] body1_name = self.model.body_names[body1] body2 = self.model.geom_bodyid[contact.geom2] body2_name = self.model.body_names[body2] # Evaluate if the ball collides with part of the WAM (collision bodies) # or the connection of WAM and cup (geom_ids) c1 = body1_name == "ball" and ( body2_name in self._collision_bodies or contact.geom2 in self._collision_geom_ids ) c2 = body2_name == "ball" and ( body1_name in self._collision_bodies or contact.geom1 in self._collision_geom_ids ) if c1 or c2: if verbose: print_cbt( f"Undesired collision of {body1_name} and {body2_name} detected!", "y", ) return True return False def check_ball_in_cup(self, *args, verbose: bool = False): """ Check if the ball is in the cup. :param verbose: print messages when ball is in the cup :return: `True` if the ball is in the cup """ for i in range(self.sim.data.ncon): # Get current contact object contact = self.sim.data.contact[i] # Extract body-id and body-name of both contact geoms body1 = self.model.geom_bodyid[contact.geom1] body1_name = self.model.body_names[body1] body2 = self.model.geom_bodyid[contact.geom2] body2_name = self.model.body_names[body2] # Evaluate if the ball collides with part of the WAM (collision bodies) # or the connection of WAM and cup (geom_ids) cup_inner_id = self.model._geom_name2id["cup_inner"] c1 = body1_name == "ball" and contact.geom2 == cup_inner_id c2 = body2_name == "ball" and contact.geom1 == cup_inner_id if c1 or c2: if verbose: print_cbt(f"The ball is in the cup at time step {self.curr_step}.", "y") return True return False def observe(self, state: np.ndarray) -> np.ndarray: # TODO: Debug print-outs, should be removed in future... # if self._curr_step == 0: # print_cbt(f'cup xpos: {self.sim.data.get_body_xpos("cup").copy()}', 'b') # center of frame # print_cbt(f'cup xipos: {self.sim.data.get_body_xipos("cup").copy()}', 'b') # center of mass # Observe the normalized time obs = [self._curr_step / self.max_steps] # Extract the (x, z) cartesian position of cup and ball (the robot operates in the x-z plane). # Note: the cup_goal is the mujoco site object marking the goal position for the ball. It is not identical # to the coordinate system origin of the rigid body object 'cup' if self.observe_ball: obs.extend([state[-3], state[-1]]) if self.observe_cup: obs.extend([state[-6], state[-4]]) return np.array(obs)
1.101563
1
pyRasp.py
ToninoTarsi/pyRasp
0
3115
# pyRasp # Copyright (c) <NAME> 2020. Licensed under MIT. # requirement : # Python 3 # pip install pyyaml # pip install request # pip install f90nml from downloadGFSA import downloadGFSA from prepare_wps import prepare_wps from ungrib import ungrib from metgrid import metgrid from prepare_wrf import prepare_wrf from real import real from wrf import wrf result = downloadGFSA(True) prepare_wps(result) ungrib() metgrid() prepare_wrf(result) real() wrf()
1.617188
2
app/strategies/ema_bb_alligator_strategy.py
namuan/crypto-rider
1
3116
import pandas as pd import ta from app.common import reshape_data from app.strategies.base_strategy import BaseStrategy pd.set_option("display.max_columns", None) pd.set_option("display.width", None) class EMABBAlligatorStrategy(BaseStrategy): BUY_SIGNAL = "buy_signal" SELL_SIGNAL = "sell_signal" def calculate_indicators(self): df = self.load_df(limit=1000) _ = df["close_3_ema"] _ = df["boll"] ao = ta.momentum.AwesomeOscillatorIndicator(high=df["high"], low=df["low"]) df["AO"] = ao.ao() return df def can_sell(self, df): prev_candle = self.candle(df) last_ema = prev_candle["close_3_ema"] last_bb = prev_candle["boll"] return [ last_ema < last_bb, (self.candle(df, rewind=-2)["AO"] > 0) & (self.candle(df, rewind=-1)["AO"] < 0), prev_candle["volume"] > 0, ] def can_buy(self, df): prev_candle = self.candle(df) last_ema = prev_candle["close_3_ema"] last_bb = prev_candle["boll"] return [ last_ema > last_bb, (self.candle(df, rewind=-2)["AO"] < 0) & (self.candle(df, rewind=-1)["AO"] > 0), prev_candle["volume"] > 0, ] def alert_message(self, df): prev_candle = self.candle(df) last_close = prev_candle["close"] last_ao = prev_candle["AO"] return ( "Close: {:.2f}, Awesome Oscillator value: {:.2f}".format( last_close, last_ao ), )
2.46875
2
BasicScripts/basics.py
TomasBelskis/PythonAutomation
0
3117
# Python Basics # String concatenaton added_strings = str(32) + "_342" # Getting input input_from_user = input() # Basic print function print(input_from_user) # Mixing boolean and comparison operations if (4 < 5) and (5 < 6): print("True") # Basic if & if else flow if name == 'Alice': print('Hi, Alice.') elif age < 12: print("You are not Alice, kiddo.") elif age > 2000: print('Unlike you, Alice is not an undead, immortal vampire.') elif age > 100: print('You are not Alice, grannie.') # Loops in Python 3 spam = 0 while spam < 5: print('Spam, spam!') spam = spam + 1 # Access loop while True: print('Who are you?') name = input() if name != 'Joe': continue print('Hello, Joe. What is the password? (It is a fish.)') password = input() if password = '<PASSWORD>': break print('Access granted.') # For loops using range function print("My name is") for i in range(5): print('<NAME> (' + str(i) + ')') # Using starting range for i in range(12, 16): print(i) # Importing modules import random for i in range(5): print(random.randint(1, 10)) # Exiting a python program import sys while True: print('Type exit to exit.') response = input() if response == 'exit': sys.exit() print('You typed ' + response + '.')
4
4
env.example.py
wilcoln/klazor
8
3118
<filename>env.example.py DATABASE_OPTIONS = { 'database': 'klazor', 'user': 'root', 'password': '', 'charset': 'utf8mb4', } HOSTS = ['127.0.0.1', '172.16.58.3']
1.34375
1
misc/_local_settings.py
lzantal/djskell
4
3119
<gh_stars>1-10 """ Django settings. Generated by 'django-admin startproject' using Django 2.2.4. For more information on this file, see https://docs.djangoproject.com/en/2.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.2/ref/settings/ """ #DEBUG = False DEBUG = True SERVE_STATIC = DEBUG ALLOWED_HOSTS = [] # Database # https://docs.djangoproject.com/en/1.11/ref/settings/#databases DATABASES = { 'default': { #'ENGINE': 'django.db.backends.oracle' #'ENGINE': 'django.db.backends.mysql', #'ENGINE': 'django.db.backends.sqlite3', 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'mydatabase', 'USER': 'mydatabaseuser', 'PASSWORD': '<PASSWORD>', 'HOST': '127.0.0.1', 'PORT': '5432', } }
1.328125
1
contacts/forms.py
pedrohd21/Agenda-Django
1
3120
<gh_stars>1-10 from django import forms from .models import Contact class ContactForm(forms.ModelForm): class Meta: model = Contact fields = ('name', 'number', 'email', 'category', 'description')
2.140625
2
awx/api/urls/ad_hoc_command.py
ziegenberg/awx
0
3121
<reponame>ziegenberg/awx<filename>awx/api/urls/ad_hoc_command.py # Copyright (c) 2017 Ansible, Inc. # All Rights Reserved. from django.urls import re_path from awx.api.views import ( AdHocCommandList, AdHocCommandDetail, AdHocCommandCancel, AdHocCommandRelaunch, AdHocCommandAdHocCommandEventsList, AdHocCommandActivityStreamList, AdHocCommandNotificationsList, AdHocCommandStdout, ) urls = [ re_path(r'^$', AdHocCommandList.as_view(), name='ad_hoc_command_list'), re_path(r'^(?P<pk>[0-9]+)/$', AdHocCommandDetail.as_view(), name='ad_hoc_command_detail'), re_path(r'^(?P<pk>[0-9]+)/cancel/$', AdHocCommandCancel.as_view(), name='ad_hoc_command_cancel'), re_path(r'^(?P<pk>[0-9]+)/relaunch/$', AdHocCommandRelaunch.as_view(), name='ad_hoc_command_relaunch'), re_path(r'^(?P<pk>[0-9]+)/events/$', AdHocCommandAdHocCommandEventsList.as_view(), name='ad_hoc_command_ad_hoc_command_events_list'), re_path(r'^(?P<pk>[0-9]+)/activity_stream/$', AdHocCommandActivityStreamList.as_view(), name='ad_hoc_command_activity_stream_list'), re_path(r'^(?P<pk>[0-9]+)/notifications/$', AdHocCommandNotificationsList.as_view(), name='ad_hoc_command_notifications_list'), re_path(r'^(?P<pk>[0-9]+)/stdout/$', AdHocCommandStdout.as_view(), name='ad_hoc_command_stdout'), ] __all__ = ['urls']
1.929688
2
note5/package_test5.py
icexmoon/python-learning-notes
0
3122
#test.py from time_tools import * # print(compareTimestamp(111,222)) time.showNowTime() # now time is XX:XX:XX
2.5
2
fgarcade/sprites.py
fabiommendes/fgarcade
2
3123
<gh_stars>1-10 import arcade from arcade import FACE_RIGHT, FACE_DOWN, FACE_UP, FACE_LEFT class AnimatedWalkingSprite(arcade.Sprite): def __init__(self, scale: float = 1, image_x: float = 0, image_y: float = 0, center_x: float = 0, center_y: float = 0, *, stand_left, stand_right, left, right, up, down, step=20): super().__init__(scale=scale, image_x=image_x, image_y=image_y, center_x=center_x, center_y=center_y) self.state = FACE_RIGHT self.stand_right_texture = stand_right self.stand_left_texture = stand_left self.walk_left_textures = left self.walk_right_textures = right self.walk_up_textures = up self.walk_down_textures = down self.cur_texture_index = 0 self.texture_change_distance = step self.last_texture_change_center_x = 0 self.last_texture_change_center_y = 0 self._update_direction(FACE_RIGHT, self.stand_right_texture) self.textures = [self._texture] def _update_direction(self, state, texture): self.last_texture_change_center_x = self.center_x self.last_texture_change_center_y = self.center_y self.state = state self.cur_texture_index = 0 self._texture = texture def _rotate(self, delta, list): if abs(delta) >= self.texture_change_distance: self.cur_texture_index += 1 self.last_texture_change_center_x = self.center_x self.last_texture_change_center_y = self.center_y self._texture = list[self.cur_texture_index % len(list)] def update_animation(self): tol = 1. # Falling if self.change_y <= -tol: if self.state != FACE_DOWN: self._update_direction(FACE_DOWN, self.walk_down_textures[0]) else: self._rotate(self.center_y - self.last_texture_change_center_y, self.walk_down_textures) # Jumping elif self.change_y >= tol: if self.state != FACE_UP: self._update_direction(FACE_UP, self.walk_up_textures[0]) else: self._rotate(self.center_y - self.last_texture_change_center_y, self.walk_up_textures) # Going left elif self.change_x <= -tol: if self.state != FACE_LEFT: self._update_direction(FACE_LEFT, self.stand_left_texture) else: self._rotate(self.center_x - self.last_texture_change_center_x, self.walk_left_textures) # Going right elif self.change_x >= tol: if self.state != FACE_RIGHT: self._update_direction(FACE_RIGHT, self.stand_right_texture) else: self._rotate(self.center_x - self.last_texture_change_center_x, self.walk_right_textures) elif abs(self.change_x) < tol and self.state == FACE_DOWN: self._update_direction(FACE_RIGHT, self.stand_right_texture) self.textures[0] = self._texture self.width = self._texture.width * self.scale self.height = self._texture.height * self.scale
2.578125
3
src/mafUtility.py
gh-schen/SiriusEpiClassifier
1
3124
<filename>src/mafUtility.py<gh_stars>1-10 from numpy.core.fromnumeric import transpose from sklearn import linear_model from scipy.special import logit from scipy import stats from copy import deepcopy from numpy import random, concatenate, quantile, matmul, transpose import logging class singleRegModel(): """ data struct for running a single regression test """ def __init__(self, regressor): self.regressor = regressor self.mmodel = None # params self.quantile_limit_ = 0.95 def train_binary(self, x_train, y_train): self.mmodel = deepcopy(self.regressor) self.mmodel.fit(x_train, y_train) def train_quant(self, init_x, follow_x, init_y, follow_iter): self.train_binary(init_x, init_y) if follow_x is None: logging.warning("No samples have missing MAF - no follow up training") return for i in range(follow_iter): init_preds = self.mmodel.predict(init_x) upper_limit = quantile(init_preds, self.quantile_limit_) follow_y = self.mmodel.predict(follow_x) follow_y[follow_y > upper_limit] = upper_limit x_merge = concatenate((init_x, follow_x)) y_merge = concatenate((init_y, follow_y)) self.mmodel = deepcopy(self.regressor) self.mmodel.fit(x_merge, y_merge) def predict_prob(self, input_x): preds = matmul(input_x, transpose(self.mmodel.coef_)) + self.mmodel.intercept_ probs = preds[:,0] return probs def predict_quant(self, input_x): #preds = matmul(input_x, transpose(self.mmodel.coef_)) + self.mmodel.intercept_ #print(preds, self.mmodel.predict(input_x)) #probs = preds[:,0] #return probs return self.mmodel.predict(input_x) class predOutcome(): """ store output for prediction """ def __init__(self): self.true_y = None self.test_y = None self.train_ys = [] # with CV training can have multiple results self.cancer_status = None # binary: 0 for normal and 1 for cance
2.515625
3
examples/linreg.py
hanyas/sds
12
3125
import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import ARDRegression, LinearRegression # Parameters of the example np.random.seed(0) n_samples, n_features = 100, 100 # Create Gaussian data X = np.random.randn(n_samples, n_features) # Create weights with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noise with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target< y = np.dot(X, w) + noise clf = ARDRegression(fit_intercept=False, n_iter=1000) clf.fit(X, y) ols = LinearRegression(fit_intercept=False) ols.fit(X, y) from copy import deepcopy from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownPrecision from sds.distributions.lingauss import SingleOutputLinearGaussianWithKnownMean from sds.distributions.gaussian import GaussianWithPrecision from sds.distributions.gaussian import GaussianWithKnownMeanAndDiagonalPrecision from sds.distributions.gamma import Gamma likelihood_precision_prior = Gamma(dim=1, alphas=np.ones((1, )), betas=1e-6 * np.ones((1, ))) parameter_precision_prior = Gamma(dim=n_features, alphas=np.ones((n_features, )), betas=1e-6 * np.ones((n_features, ))) likelihood_precision_posterior = deepcopy(likelihood_precision_prior) parameter_precision_posterior = deepcopy(parameter_precision_prior) parameter_posterior = None for i in range(100): # parameter posterior alphas = parameter_precision_posterior.mean() parameter_prior = GaussianWithPrecision(dim=n_features, mu=np.zeros((n_features, )), lmbda=np.diag(alphas)) parameter_posterior = deepcopy(parameter_prior) beta = likelihood_precision_posterior.mean() likelihood_known_precision = SingleOutputLinearGaussianWithKnownPrecision(column_dim=n_features, lmbda=beta, affine=False) stats = likelihood_known_precision.statistics(X, y) parameter_posterior.nat_param = parameter_prior.nat_param + stats # likelihood precision posterior param = parameter_posterior.mean() likelihood_known_mean = SingleOutputLinearGaussianWithKnownMean(column_dim=n_features, W=param, affine=False) stats = likelihood_known_mean.statistics(X, y) likelihood_precision_posterior.nat_param = likelihood_precision_prior.nat_param + stats # parameter precision posterior parameter_likelihood = GaussianWithKnownMeanAndDiagonalPrecision(dim=n_features) param = parameter_posterior.mean() stats = parameter_likelihood.statistics(param) parameter_precision_posterior.nat_param = parameter_precision_prior.nat_param + stats our_ard = parameter_posterior.mode() from sds.distributions.composite import MatrixNormalGamma from sds.distributions.lingauss import LinearGaussianWithDiagonalPrecision M = np.zeros((1, n_features)) K = 1e-16 * np.eye(n_features) alphas = 1e-16 * np.ones((1, )) betas = 1e-16 * np.ones((1, )) prior = MatrixNormalGamma(column_dim=n_features, row_dim=1, M=M, K=K, alphas=alphas, betas=betas) posterior = deepcopy(prior) likelihood = LinearGaussianWithDiagonalPrecision(column_dim=n_features, row_dim=1, affine=False) stats = likelihood.statistics(X, np.atleast_2d(y).T) posterior.nat_param = prior.nat_param + stats our_ols = posterior.mode()[0] plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth") plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2, label="Sklearn ARD") plt.plot(our_ard, color='red', linestyle='-', linewidth=2, label="Our ARD") # plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2, label="Sklearn OLS") # plt.plot(our_ols.flatten(), color='cyan', linestyle='-', linewidth=2, label="Our OLS") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc=1) plt.show()
2.875
3
optimal/tompkins/examples/dask_scheduling_problem_nonetcontention.py
KarizCache/serverless
0
3126
#!/usr/bin/python3 import os import json import re import ast import json from graphviz import Digraph import pandas as pd # color the graph import graph_tool.all as gt import copy import matplotlib.colors as mcolors import sys import utils from tompkins.ilp import schedule, jobs_when_where from collections import defaultdict from pulp import value import re import ast import json from graphviz import Digraph import pandas as pd # color the graph import graph_tool.all as gt import copy import matplotlib.colors as mcolors import sys import seaborn as sns def get_benchmarks(): benchmarks = {} for _file in os.listdir(stats_dir): try: bnch = _file.rsplit('.', 1)[0] assert os.path.isfile(os.path.join(stats_dir, f'{bnch}.iopt')) app = bnch #, scheduler = bnch.rsplit(':', 1) scheduler = 'vanilla' benchmarks[bnch] = {'app': app, 'scheduler': scheduler, 'benchmark': bnch} except AssertionError: pass return benchmarks def build_graph(benchmark): css_colors = list(mcolors.CSS4_COLORS.keys()) gfile = os.path.join(stats_dir, f'{benchmark}.iopt') with open(gfile, 'r') as fd: raw = fd.read().split('\n') g = gt.Graph(directed=True) vid_to_vx = {} name_to_vid = {} g.vertex_properties['name'] = g.new_vertex_property("string") g.vertex_properties['worker'] = g.new_vertex_property("string") g.vertex_properties['color'] = g.new_vertex_property("string", '#e0e0e0') g.vertex_properties['icolor'] = g.new_vertex_property("int") g.vertex_properties['output_size'] = g.new_vertex_property("int") g.vertex_properties['runtime'] = g.new_vertex_property("float") for ln in raw: if ln.startswith('v'): _, vid, name, runtime, output_size = ln.split(',', 4) v = g.add_vertex() vid_to_vx[vid] = v name_to_vid[name] = vid g.vp.name[v] = name g.vp.runtime[v] = float(runtime) # 1 second g.vp.output_size[v] = float(output_size) # 1GB g.vp.color[v] = '#e0e0e0' for ln in raw: if ln.startswith('e'): _, vsrc, vdst = ln.split(',') g.add_edge(vid_to_vx[vsrc], vid_to_vx[vdst]) return g def get_runtime_statistics(benchmark): tasks = [] statistics = {} jfile = os.path.join(stats_dir, f'{benchmark}.json') with open(jfile, 'r') as fd: stats = ast.literal_eval(fd.read()) for ts in stats: ops = 'ts'; #ts.replace("(", '').replace(')', '').split("'")[1].split('-')[0] statistics[ts] = {'key': ts, 'op': ops, 'output_size': stats[ts]['msg']['nbytes'], 'worker': stats[ts]['worker'].split(':')[1].replace('/', '')} startsstops = stats[ts]['msg']['startstops'] for ss in startsstops: if ss['action'] == 'compute': statistics[ts]['compute_end'] = ss['stop'] statistics[ts]['compute_start'] = ss['start'] statistics[ts]['runtime'] = ss['stop'] - ss['start'] cfile = os.path.join(stats_dir, f'{benchmark}.colors') with open(cfile, 'r') as cfd: raw = cfd.read().split('\n') for ln in raw: if not ln: continue ts, color = ln.split(',') #ts += ')' statistics[ts]['color'] = int(color) return statistics def plot_graph(g, benchmark, optimal=False): print(benchmark["benchmark"]) post = ".optimal" if optimal else "" dg = Digraph('G', filename=f'{benchmark["benchmark"]}{post}.gv', format='png') for v in g.vertices(): dg.attr('node', shape='ellipse', style="filled,solid", penwidth="3", fillcolor=g.vp.color[v], color=worker_color[g.vp.statistics[v]['worker']]) #if benchmark['scheduler'] == "vanilla": # dg.node(f'{v}') #else: dg.node(f'{v}, color({g.vp.icolor[v]})') for e in g.edges(): #if benchmark['scheduler'] == "vanilla": # dg.edge(f'{e.source()}', f'{e.target()}') #else: dg.edge(f'{e.source()}, color({g.vp.icolor[e.source()]})', f'{e.target()}, color({g.vp.icolor[e.target()]})') dg.view(os.path.join(f'{results_dir}',f'{benchmark["benchmark"]}{post}'), quiet=False) import pulp as pl import time def find_optimal(g, bw): n_workers = 4 workers = [f'w{i}' for i in range(n_workers)] # Job Release Times - Additional constraints on availablility of Jobs # R = np.zeros(n) R = defaultdict(lambda:0) # Maximum makespan M = 100 B = defaultdict(lambda:1) agents = workers jobs = [] for v in g.vertices(): jobs.append(f't{v}') n = len(jobs) m = len(agents) P = defaultdict(lambda:0) for e in g.edges(): P[f't{e.source()}',f't{e.target()}'] = 1 # computation D = defaultdict(lambda:0) for v in g.vertices(): for a in agents: D[f't{v}', a] = g.vp.runtime[v] # statistics[g.vp.name[v]]['runtime'] # Communication Delay matrix - Cost of sending results of job from # agent to agent #bw = 10*(1<<30)/(1<<3) bw = bw*(1<<20)/(1<<3) C = defaultdict(lambda:0) for v in g.vertices(): for a in agents: for b in agents: C[f't{v}', a, b] = 0 if a == b else g.vp.output_size[v]/bw # 0 --> cost_serialization start = time.time() # Set up the Mixed Integer Linear Program prob, X, S, Cmax = schedule(jobs, agents, D, C, R, B, P, M) solver = pl.GUROBI_CMD() prob.solve(solver) latency = time.time() - start print('-----------------------------------------------> constraints', len(prob.constraints.keys())) print('----------------------------------------------> # of variables', prob.numVariables()) print('---------------------------------------------->', latency) print("Makespan: ", value(Cmax)) sched = jobs_when_where(prob, X, S, Cmax) print("Schedule: ", sched) sched2 = [] for j in sched: new = j + (j[1] + D[j[0], j[2]], g.vp.name[int(j[0].replace('t', ''))]) sched2.append(new) print("Schedule: ", sched2) return sched2, {'makespan': value(Cmax), 'constraints': len(prob.constraints.keys()), 'variables': prob.numVariables(), 'time': float(latency)} results_dir = './benchmarks' stats_dir='./benchmarks' benchmarks = get_benchmarks() #benchmarks = ['dom4x61GB1B', 'dom2x41GB1B', 'tree4x61GB1B'] for bnch in benchmarks: for bw in [1*1024, 16*1024, 512, 32*1024, 8*1024, 4*1024, 2*1024, 256, 128, 64, 32]: print(f'process {bnch}') g = build_graph(bnch) sched2, stats = find_optimal(g, bw) with open(f'{results_dir}/optimal_compuation_stats.csv', 'a') as fd: fd.write(f'{bnch},{stats["makespan"]},{stats["constraints"]},{stats["variables"]},{stats["time"]},no,{bw}\n') with open(f'{results_dir}/{bnch}.nonetworkcontention.{bw}mbps.optimal', 'w') as fd: for s in sched2: fd.write(f'v,{s[0]},{s[1]},{s[2]}\n') #fd.write(f'{s[4]},{s[3]},{s[0]},{s[1]},{s[2]}\n') #v = int(s[0].replace('t', '')) #g.vp.worker[v] = s[2] break #break
2.109375
2
tests/apitests/python/test_robot_account.py
gerhardgossen/harbor
1
3127
from __future__ import absolute_import import unittest from testutils import ADMIN_CLIENT from testutils import TEARDOWN from library.user import User from library.project import Project from library.repository import Repository from library.repository import pull_harbor_image from library.repository import push_image_to_project from testutils import harbor_server from library.base import _assert_status_code class TestProjects(unittest.TestCase): @classmethod def setUp(self): self.project = Project() self.user = User() self.repo = Repository() @classmethod def tearDown(self): print "Case completed" @unittest.skipIf(TEARDOWN == False, "Test data won't be erased.") def test_ClearData(self): #1. Delete repository(RA) by user(UA); self.repo.delete_repoitory(TestProjects.project_ra_name_a, TestProjects.repo_name_in_project_a.split('/')[1], **TestProjects.USER_RA_CLIENT) self.repo.delete_repoitory(TestProjects.project_ra_name_b, TestProjects.repo_name_in_project_b.split('/')[1], **TestProjects.USER_RA_CLIENT) self.repo.delete_repoitory(TestProjects.project_ra_name_c, TestProjects.repo_name_in_project_c.split('/')[1], **TestProjects.USER_RA_CLIENT) self.repo.delete_repoitory(TestProjects.project_ra_name_a, TestProjects.repo_name_pa.split('/')[1], **TestProjects.USER_RA_CLIENT) #2. Delete project(PA); self.project.delete_project(TestProjects.project_ra_id_a, **TestProjects.USER_RA_CLIENT) self.project.delete_project(TestProjects.project_ra_id_b, **TestProjects.USER_RA_CLIENT) self.project.delete_project(TestProjects.project_ra_id_c, **TestProjects.USER_RA_CLIENT) #3. Delete user(UA). self.user.delete_user(TestProjects.user_ra_id, **ADMIN_CLIENT) def testRobotAccount(self): """ Test case: Robot Account Test step and expected result: 1. Create user(UA); 2. Create private project(PA), private project(PB) and public project(PC) by user(UA); 3. Push image(ImagePA) to project(PA), image(ImagePB) to project(PB) and image(ImagePC) to project(PC) by user(UA); 4. Create a new robot account(RA) with pull and push privilige in project(PA) by user(UA); 5. Check robot account info, it should has both pull and push priviliges; 6. Pull image(ImagePA) from project(PA) by robot account(RA), it must be successful; 7. Push image(ImageRA) to project(PA) by robot account(RA), it must be successful; 8. Push image(ImageRA) to project(PB) by robot account(RA), it must be not successful; 9. Pull image(ImagePB) from project(PB) by robot account(RA), it must be not successful; 10. Pull image from project(PC), it must be successful; 11. Push image(ImageRA) to project(PC) by robot account(RA), it must be not successful; 12. Update action property of robot account(RA); 13. Pull image(ImagePA) from project(PA) by robot account(RA), it must be not successful; 14. Push image(ImageRA) to project(PA) by robot account(RA), it must be not successful; 15. Delete robot account(RA), it must be not successful. Tear down: 1. Delete repository(RA) by user(UA); 2. Delete project(PA); 3. Delete user(UA). """ url = ADMIN_CLIENT["endpoint"] admin_name = ADMIN_CLIENT["username"] admin_password = ADMIN_CLIENT["password"] user_ra_password = "<PASSWORD>" image_project_a = "haproxy" image_project_b = "hello-world" image_project_c = "httpd" image_robot_account = "alpine" tag = "latest" print "#1. Create user(UA);" TestProjects.user_ra_id, user_ra_name = self.user.create_user(user_password = <PASSWORD>, **ADMIN_CLIENT) TestProjects.USER_RA_CLIENT=dict(endpoint = url, username = user_ra_name, password = <PASSWORD>) print "#2. Create private project(PA), private project(PB) and public project(PC) by user(UA);" TestProjects.project_ra_id_a, TestProjects.project_ra_name_a = self.project.create_project(metadata = {"public": "false"}, **TestProjects.USER_RA_CLIENT) TestProjects.project_ra_id_b, TestProjects.project_ra_name_b = self.project.create_project(metadata = {"public": "false"}, **TestProjects.USER_RA_CLIENT) TestProjects.project_ra_id_c, TestProjects.project_ra_name_c = self.project.create_project(metadata = {"public": "true"}, **TestProjects.USER_RA_CLIENT) print "#3. Push image(ImagePA) to project(PA), image(ImagePB) to project(PB) and image(ImagePC) to project(PC) by user(UA);" TestProjects.repo_name_in_project_a, tag_a = push_image_to_project(TestProjects.project_ra_name_a, harbor_server, user_ra_name, user_ra_password, image_project_a, tag) TestProjects.repo_name_in_project_b, tag_b = push_image_to_project(TestProjects.project_ra_name_b, harbor_server, user_ra_name, user_ra_password, image_project_b, tag) TestProjects.repo_name_in_project_c, tag_c = push_image_to_project(TestProjects.project_ra_name_c, harbor_server, user_ra_name, user_ra_password, image_project_c, tag) print "#4. Create a new robot account(RA) with pull and push privilige in project(PA) by user(UA);" robot_id, robot_account = self.project.add_project_robot_account(TestProjects.project_ra_id_a, TestProjects.project_ra_name_a, 2441000531 ,**TestProjects.USER_RA_CLIENT) print robot_account.name print robot_account.token print "#5. Check robot account info, it should has both pull and push priviliges;" data = self.project.get_project_robot_account_by_id(TestProjects.project_ra_id_a, robot_id, **TestProjects.USER_RA_CLIENT) _assert_status_code(robot_account.name, data.name) print "#6. Pull image(ImagePA) from project(PA) by robot account(RA), it must be successful;" pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_a, tag_a) print "#7. Push image(ImageRA) to project(PA) by robot account(RA), it must be successful;" TestProjects.repo_name_pa, _ = push_image_to_project(TestProjects.project_ra_name_a, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag) print "#8. Push image(ImageRA) to project(PB) by robot account(RA), it must be not successful;" push_image_to_project(TestProjects.project_ra_name_b, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag, expected_error_message = "unauthorized to access repository") print "#9. Pull image(ImagePB) from project(PB) by robot account(RA), it must be not successful;" pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_b, tag_b, expected_error_message = "unauthorized to access repository") print "#10. Pull image from project(PC), it must be successful;" pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_c, tag_c) print "#11. Push image(ImageRA) to project(PC) by robot account(RA), it must be not successful;" push_image_to_project(TestProjects.project_ra_name_c, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag, expected_error_message = "unauthorized to access repository") print "#12. Update action property of robot account(RA);" self.project.disable_project_robot_account(TestProjects.project_ra_id_a, robot_id, True, **TestProjects.USER_RA_CLIENT) print "#13. Pull image(ImagePA) from project(PA) by robot account(RA), it must be not successful;" pull_harbor_image(harbor_server, robot_account.name, robot_account.token, TestProjects.repo_name_in_project_a, tag_a, expected_login_error_message = "unauthorized: authentication required") print "#14. Push image(ImageRA) to project(PA) by robot account(RA), it must be not successful;" push_image_to_project(TestProjects.project_ra_name_a, harbor_server, robot_account.name, robot_account.token, image_robot_account, tag, expected_login_error_message = "unauthorized: authentication required") print "#15. Delete robot account(RA), it must be not successful." self.project.delete_project_robot_account(TestProjects.project_ra_id_a, robot_id, **TestProjects.USER_RA_CLIENT) if __name__ == '__main__': unittest.main()
2.25
2
slackchannel2pdf/locales.py
ErikKalkoken/slackchannel2pdf
52
3128
import datetime as dt import logging from babel import Locale, UnknownLocaleError from babel.dates import format_datetime, format_time, format_date import pytz from tzlocal import get_localzone from . import settings logger = logging.getLogger(__name__) class LocaleHelper: """Helpers for converting date & time according to current locale and timezone""" def __init__( self, my_locale: Locale = None, my_tz: pytz.BaseTzInfo = None, author_info: dict = None, ) -> None: """ Args: - my_locale: Primary locale to use - my_tz: Primary timezone to use - author_info: locale and timezone to use from this Slack response if my_locale and/or my_tz are not given """ self._locale = self._determine_locale(my_locale, author_info) self._timezone = self._determine_timezone(my_tz, author_info) @staticmethod def _determine_locale(my_locale: Locale = None, author_info: dict = None) -> Locale: if my_locale: if not isinstance(my_locale, Locale): raise TypeError("my_locale must be a babel Locale object") else: if author_info: try: my_locale = Locale.parse(author_info["locale"], sep="-") except UnknownLocaleError: logger.warning("Could not use locale info from Slack") my_locale = Locale.default() else: my_locale = Locale.default() if not my_locale: my_locale = Locale.parse(settings.FALLBACK_LOCALE) return my_locale @staticmethod def _determine_timezone( my_tz: pytz.BaseTzInfo = None, author_info: dict = None ) -> pytz.BaseTzInfo: if my_tz: if not isinstance(my_tz, pytz.BaseTzInfo): raise TypeError("my_tz must be of type pytz") else: if author_info: try: my_tz = pytz.timezone(author_info["tz"]) except pytz.exceptions.UnknownTimeZoneError: logger.warning("Could not use timezone info from Slack") my_tz = get_localzone() else: my_tz = get_localzone() if not my_tz: my_tz = pytz.UTC return my_tz @property def locale(self) -> Locale: return self._locale @property def timezone(self) -> pytz.BaseTzInfo: return self._timezone def format_date_full_str(self, my_datetime: dt.datetime) -> str: return format_date(my_datetime, format="full", locale=self.locale) def format_datetime_str(self, my_datetime: dt.datetime) -> str: """returns formated datetime string for given dt using locale""" return format_datetime(my_datetime, format="short", locale=self.locale) def get_datetime_formatted_str(self, ts: int) -> str: """return given timestamp as formated datetime string using locale""" my_datetime = self.get_datetime_from_ts(ts) return format_datetime(my_datetime, format="short", locale=self.locale) def get_time_formatted_str(self, ts: int) -> str: """return given timestamp as formated datetime string using locale""" my_datetime = self.get_datetime_from_ts(ts) return format_time(my_datetime, format="short", locale=self.locale) def get_datetime_from_ts(self, ts: int) -> dt.datetime: """returns datetime object of a unix timestamp with local timezone""" my_datetime = dt.datetime.fromtimestamp(float(ts), pytz.UTC) return my_datetime.astimezone(self.timezone)
2.71875
3
databoard/databoard/default_config.py
glemaitre/ramp-board-1
0
3129
<reponame>glemaitre/ramp-board-1<gh_stars>0 import os class Config(object): # FLASK GENERAL CONFIG PARAMETERS SECRET_KEY = os.getenv('DATABOARD_SECRET_KEY', '<KEY>') # abs max upload file size, to throw 413, before saving it WTF_CSRF_ENABLED = True LOG_FILENAME = None # if None, output to screen MAX_CONTENT_LENGTH = 1024 * 1024 * 1024 DEBUG = False TESTING = False # FLASK MAIL CONFIG PARAMETERS MAIL_SERVER = os.getenv('DATABOARD_MAIL_SERVER', 'smtp.gmail.com') MAIL_PORT = os.getenv('DATABOARD_MAIL_PORT', 587) MAIL_USERNAME = os.getenv('DATABOARD_MAIL_USERNAME', 'user') MAIL_PASSWORD = os.getenv('DATABOARD_MAIL_PASSWORD', 'password') MAIL_DEFAULT_SENDER = ( os.getenv('DATABOARD_MAIL_SENDER_ALIAS', 'RAMP admin'), os.getenv('DATABOARD_MAIL_SENDER', '<EMAIL>') ) MAIL_RECIPIENTS = [] MAIL_USE_TLS = False MAIL_USE_SSL = True MAIL_DEBUG = False SQLALCHEMY_TRACK_MODIFICATIONS = True SQLALCHEMY_DATABASE_URI = os.getenv('DATABOARD_DB_URL') SQLALCHEMY_MIGRATE_REPO = os.getenv('DATABOARD_DB_MIGRATE_REPO') SQLALCHEMY_RECORD_QUERIES = ( True if os.getenv('DATABOARD_DB_PERF', 0) else False ) class RampConfig(object): RAMP_ADMIN_MAILS = os.getenv('DATABOARD_ADMIN_MAILS', []) RAMP_KITS_DIR = 'ramp-kits' RAMP_DATA_DIR = 'ramp-data' RAMP_SUBMISSIONS_DIR = 'submissions' RAMP_SANDBOX_DIR = 'starting_kit' RAMP_SERVER_PORT = 8080 # make it False if parallel training is not working # is_parallelize RAMP_PARALLELIZE = bool(os.getenv('DATABOARD_PARALLELIZE', 1)) ###################################################################### class ProductionConfig(Config): DEPLOYMENT_PATH = os.getenv( 'DATABOARD_DEPLOYMENT_PATH', '/tmp/databoard') class DevelopmentConfig(Config): DEBUG = True MAIL_DEBUG = True SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABOARD_DB_URL_TEST', 'postgresql://mrramp:mrramp@localhost/databoard_test' ) DEPLOYMENT_PATH = os.getenv( 'DATABOARD_DEPLOYMENT_PATH_TEST', '/tmp/databoard_test') class TestingConfig(Config): TESTING = True SQLALCHEMY_DATABASE_URI = os.getenv( 'DATABOARD_DB_URL_TEST', 'postgresql://mrramp:mrramp@localhost/databoard_test' ) DEPLOYMENT_PATH = os.getenv( 'DATABOARD_DEPLOYMENT_PATH_TEST', '/tmp/databoard_test', )
1.90625
2
python_developer_tools/cv/bases/pool/AvgPool2d.py
carlsummer/python_developer_tools
32
3130
<filename>python_developer_tools/cv/bases/pool/AvgPool2d.py # !/usr/bin/env python # -- coding: utf-8 -- # @Author zengxiaohui # Datatime:8/31/2021 1:37 PM # @File:GlobalAvgPool2d import torch.nn as nn from python_developer_tools.cv.bases.activates.swish import h_swish class GlobalAvgPool2d(nn.Module): """ Fast implementation of global average pooling from TResNet: High Performance GPU-Dedicated Architecture https://arxiv.org/pdf/2003.13630.pdf Args: flatten (bool, optional): whether spatial dimensions should be squeezed """ def __init__(self, flatten: bool = False) -> None: super().__init__() self.flatten = flatten def forward(self, x): if self.flatten: in_size = x.size() return x.view((in_size[0], in_size[1], -1)).mean(dim=2) else: return x.view(x.size(0), x.size(1), -1).mean(-1).view(x.size(0), x.size(1), 1, 1) class SwishAdaptiveAvgPool2d(nn.Module): def __init__(self,inplace=True): super().__init__() self.avgpool=nn.Sequential( nn.ReLU6(inplace=inplace), nn.AdaptiveAvgPool2d((1, 1)), h_swish() ) def forward(self, x): return self.avgpool(x)
2.453125
2
expyfun/_utils.py
nordme/expyfun
2
3131
<filename>expyfun/_utils.py """Some utility functions""" # Authors: <NAME> <<EMAIL>> # # License: BSD (3-clause) import warnings import operator from copy import deepcopy import subprocess import importlib import os import os.path as op import inspect import sys import tempfile import ssl from shutil import rmtree import atexit import json from functools import partial from distutils.version import LooseVersion from numpy import sqrt, convolve, ones import logging import datetime from timeit import default_timer as clock from threading import Timer import numpy as np import scipy as sp from ._externals import decorator # set this first thing to make sure it "takes" try: import pyglet pyglet.options['debug_gl'] = False del pyglet except Exception: pass # for py3k (eventually) if sys.version.startswith('2'): string_types = basestring # noqa input = raw_input # noqa, input is raw_input in py3k text_type = unicode # noqa from __builtin__ import reload from urllib2 import urlopen # noqa from cStringIO import StringIO # noqa else: string_types = str text_type = str from urllib.request import urlopen input = input from io import StringIO # noqa, analysis:ignore from importlib import reload # noqa, analysis:ignore ############################################################################### # LOGGING EXP = 25 logging.addLevelName(EXP, 'EXP') def exp(self, message, *args, **kwargs): """Experiment-level logging.""" self.log(EXP, message, *args, **kwargs) logging.Logger.exp = exp logger = logging.getLogger('expyfun') def flush_logger(): """Flush expyfun logger""" for handler in logger.handlers: handler.flush() def set_log_level(verbose=None, return_old_level=False): """Convenience function for setting the logging level Parameters ---------- verbose : bool, str, int, or None The verbosity of messages to print. If a str, it can be either DEBUG, INFO, WARNING, ERROR, or CRITICAL. Note that these are for convenience and are equivalent to passing in logging.DEBUG, etc. For bool, True is the same as 'INFO', False is the same as 'WARNING'. If None, the environment variable EXPYFUN_LOGGING_LEVEL is read, and if it doesn't exist, defaults to INFO. return_old_level : bool If True, return the old verbosity level. """ if verbose is None: verbose = get_config('EXPYFUN_LOGGING_LEVEL', 'INFO') elif isinstance(verbose, bool): verbose = 'INFO' if verbose is True else 'WARNING' if isinstance(verbose, string_types): verbose = verbose.upper() logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO, WARNING=logging.WARNING, ERROR=logging.ERROR, CRITICAL=logging.CRITICAL) if verbose not in logging_types: raise ValueError('verbose must be of a valid type') verbose = logging_types[verbose] old_verbose = logger.level logger.setLevel(verbose) return (old_verbose if return_old_level else None) def set_log_file(fname=None, output_format='%(asctime)s - %(levelname)-7s - %(message)s', overwrite=None): """Convenience function for setting the log to print to a file Parameters ---------- fname : str, or None Filename of the log to print to. If None, stdout is used. To suppress log outputs, use set_log_level('WARN'). output_format : str Format of the output messages. See the following for examples: http://docs.python.org/dev/howto/logging.html e.g., "%(asctime)s - %(levelname)s - %(message)s". overwrite : bool, or None Overwrite the log file (if it exists). Otherwise, statements will be appended to the log (default). None is the same as False, but additionally raises a warning to notify the user that log entries will be appended. """ handlers = logger.handlers for h in handlers: if isinstance(h, logging.FileHandler): h.close() logger.removeHandler(h) if fname is not None: if op.isfile(fname) and overwrite is None: warnings.warn('Log entries will be appended to the file. Use ' 'overwrite=False to avoid this message in the ' 'future.') mode = 'w' if overwrite is True else 'a' lh = logging.FileHandler(fname, mode=mode) else: """ we should just be able to do: lh = logging.StreamHandler(sys.stdout) but because doctests uses some magic on stdout, we have to do this: """ lh = logging.StreamHandler(WrapStdOut()) lh.setFormatter(logging.Formatter(output_format)) # actually add the stream handler logger.addHandler(lh) ############################################################################### # RANDOM UTILITIES building_doc = any('sphinx-build' in ((''.join(i[4]).lower() + i[1]) if i[4] is not None else '') for i in inspect.stack()) def run_subprocess(command, **kwargs): """Run command using subprocess.Popen Run command and wait for command to complete. If the return code was zero then return, otherwise raise CalledProcessError. By default, this will also add stdout= and stderr=subproces.PIPE to the call to Popen to suppress printing to the terminal. Parameters ---------- command : list of str Command to run as subprocess (see subprocess.Popen documentation). **kwargs : objects Keywoard arguments to pass to ``subprocess.Popen``. Returns ------- stdout : str Stdout returned by the process. stderr : str Stderr returned by the process. """ # code adapted with permission from mne-python kw = dict(stderr=subprocess.PIPE, stdout=subprocess.PIPE) kw.update(kwargs) p = subprocess.Popen(command, **kw) stdout_, stderr = p.communicate() output = (stdout_.decode(), stderr.decode()) if p.returncode: err_fun = subprocess.CalledProcessError.__init__ if 'output' in _get_args(err_fun): raise subprocess.CalledProcessError(p.returncode, command, output) else: raise subprocess.CalledProcessError(p.returncode, command) return output class ZeroClock(object): """Clock that uses "clock" function but starts at zero on init.""" def __init__(self): self._start_time = clock() def get_time(self): """Get time.""" return clock() - self._start_time def date_str(): """Produce a date string for the current date and time Returns ------- datestr : str The date string. """ return str(datetime.datetime.today()).replace(':', '_') class WrapStdOut(object): """Ridiculous class to work around how doctest captures stdout.""" def __getattr__(self, name): # Even more ridiculous than this class, this must be sys.stdout (not # just stdout) in order for this to work (tested on OSX and Linux) return getattr(sys.stdout, name) class _TempDir(str): """Class for creating and auto-destroying temp dir This is designed to be used with testing modules. We cannot simply use __del__() method for cleanup here because the rmtree function may be cleaned up before this object, so we use the atexit module instead. Passing del_after and print_del kwargs to the constructor are helpful primarily for debugging purposes. """ def __new__(self, del_after=True, print_del=False): new = str.__new__(self, tempfile.mkdtemp()) self._del_after = del_after self._print_del = print_del return new def __init__(self): self._path = self.__str__() atexit.register(self.cleanup) def cleanup(self): if self._del_after is True: if self._print_del is True: print('Deleting {} ...'.format(self._path)) rmtree(self._path, ignore_errors=True) def check_units(units): """Ensure user passed valid units type Parameters ---------- units : str Must be ``'norm'``, ``'deg'``, or ``'pix'``. """ good_units = ['norm', 'pix', 'deg'] if units not in good_units: raise ValueError('"units" must be one of {}, not {}' ''.format(good_units, units)) ############################################################################### # DECORATORS # Following deprecated class copied from scikit-learn class deprecated(object): """Decorator to mark a function or class as deprecated. Issue a warning when the function is called/the class is instantiated and adds a warning to the docstring. The optional extra argument will be appended to the deprecation message and the docstring. Note: to use this with the default value for extra, put in an empty of parentheses: >>> from expyfun._utils import deprecated >>> deprecated() # doctest: +ELLIPSIS <expyfun._utils.deprecated object at ...> >>> @deprecated() ... def some_function(): pass """ # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary, # but with many changes. # scikit-learn will not import on all platforms b/c it can be # sklearn or scikits.learn, so a self-contained example is used above def __init__(self, extra=''): """ Parameters ---------- extra: string to be added to the deprecation messages """ self.extra = extra def __call__(self, obj): """Call.""" if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj) def _decorate_class(self, cls): msg = "Class %s is deprecated" % cls.__name__ if self.extra: msg += "; %s" % self.extra # FIXME: we should probably reset __new__ for full generality init = cls.__init__ def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return init(*args, **kwargs) cls.__init__ = wrapped wrapped.__name__ = '__init__' wrapped.__doc__ = self._update_doc(init.__doc__) wrapped.deprecated_original = init return cls def _decorate_fun(self, fun): """Decorate function fun""" msg = "Function %s is deprecated" % fun.__name__ if self.extra: msg += "; %s" % self.extra def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__name__ = fun.__name__ wrapped.__dict__ = fun.__dict__ wrapped.__doc__ = self._update_doc(fun.__doc__) return wrapped def _update_doc(self, olddoc): newdoc = "DEPRECATED" if self.extra: newdoc = "%s: %s" % (newdoc, self.extra) if olddoc: newdoc = "%s\n\n%s" % (newdoc, olddoc) return newdoc if hasattr(inspect, 'signature'): # py35 def _get_args(function, varargs=False): params = inspect.signature(function).parameters args = [key for key, param in params.items() if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)] if varargs: varargs = [param.name for param in params.values() if param.kind == param.VAR_POSITIONAL] if len(varargs) == 0: varargs = None return args, varargs else: return args else: def _get_args(function, varargs=False): out = inspect.getargspec(function) # args, varargs, keywords, defaults if varargs: return out[:2] else: return out[0] @decorator def verbose_dec(function, *args, **kwargs): """Improved verbose decorator to allow functions to override log-level Do not call this directly to set global verbosrity level, instead use set_log_level(). Parameters ---------- function : callable Function to be decorated by setting the verbosity level. Returns ------- dec - function The decorated function """ arg_names = _get_args(function) if len(arg_names) > 0 and arg_names[0] == 'self': default_level = getattr(args[0], 'verbose', None) else: default_level = None if('verbose' in arg_names): verbose_level = args[arg_names.index('verbose')] else: verbose_level = default_level if verbose_level is not None: old_level = set_log_level(verbose_level, True) # set it back if we get an exception try: ret = function(*args, **kwargs) except Exception: set_log_level(old_level) raise set_log_level(old_level) return ret else: ret = function(*args, **kwargs) return ret def _new_pyglet(): import pyglet return LooseVersion(pyglet.version) >= LooseVersion('1.4') def _has_video(): if _new_pyglet(): try: from pyglet.media.codecs.ffmpeg import FFmpegSource # noqa except ImportError: return False else: try: from pyglet.media.avbin import AVbinSource # noqa except ImportError: try: from pyglet.media.sources.avbin import AVbinSource # noqa except ImportError: return False return True def requires_video(): """Requires FFmpeg/AVbin decorator.""" import pytest return pytest.mark.skipif(not _has_video(), reason='Requires FFmpeg/AVbin') def requires_opengl21(func): """Requires OpenGL decorator.""" import pytest import pyglet.gl vendor = pyglet.gl.gl_info.get_vendor() version = pyglet.gl.gl_info.get_version() sufficient = pyglet.gl.gl_info.have_version(2, 0) return pytest.mark.skipif(not sufficient, reason='OpenGL too old: %s %s' % (vendor, version,))(func) def requires_lib(lib): """Requires lib decorator.""" import pytest try: importlib.import_module(lib) except Exception as exp: val = True reason = 'Needs %s (%s)' % (lib, exp) else: val = False reason = '' return pytest.mark.skipif(val, reason=reason) def _has_scipy_version(version): return (LooseVersion(sp.__version__) >= LooseVersion(version)) def _get_user_home_path(): """Return standard preferences path""" # this has been checked on OSX64, Linux64, and Win32 val = os.getenv('APPDATA' if 'nt' == os.name.lower() else 'HOME', None) if val is None: raise ValueError('expyfun config file path could ' 'not be determined, please report this ' 'error to expyfun developers') return val def fetch_data_file(fname): """Fetch example remote file Parameters ---------- fname : str The remote filename to get. If the filename already exists on the local system, the file will not be fetched again. Returns ------- fname : str The filename on the local system where the file was downloaded. """ path = get_config('EXPYFUN_DATA_PATH', op.join(_get_user_home_path(), '.expyfun', 'data')) fname_out = op.join(path, fname) if not op.isdir(op.dirname(fname_out)): os.makedirs(op.dirname(fname_out)) fname_url = ('https://github.com/LABSN/expyfun-data/raw/master/{0}' ''.format(fname)) try: # until we get proper certificates context = ssl._create_unverified_context() this_urlopen = partial(urlopen, context=context) except AttributeError: context = None this_urlopen = urlopen if not op.isfile(fname_out): try: with open(fname_out, 'wb') as fid: www = this_urlopen(fname_url, timeout=30.0) try: fid.write(www.read()) finally: www.close() except Exception: os.remove(fname_out) raise return fname_out def get_config_path(): r"""Get path to standard expyfun config file. Returns ------- config_path : str The path to the expyfun configuration file. On windows, this will be '%APPDATA%\.expyfun\expyfun.json'. On every other system, this will be $HOME/.expyfun/expyfun.json. """ val = op.join(_get_user_home_path(), '.expyfun', 'expyfun.json') return val # List the known configuration values known_config_types = ('RESPONSE_DEVICE', 'AUDIO_CONTROLLER', 'DB_OF_SINE_AT_1KHZ_1RMS', 'EXPYFUN_EYELINK', 'SOUND_CARD_API', 'SOUND_CARD_BACKEND', 'SOUND_CARD_FS', 'SOUND_CARD_NAME', 'SOUND_CARD_FIXED_DELAY', 'TDT_CIRCUIT_PATH', 'TDT_DELAY', 'TDT_INTERFACE', 'TDT_MODEL', 'TDT_TRIG_DELAY', 'TRIGGER_CONTROLLER', 'TRIGGER_ADDRESS', 'WINDOW_SIZE', 'SCREEN_NUM', 'SCREEN_WIDTH', 'SCREEN_DISTANCE', 'SCREEN_SIZE_PIX', 'EXPYFUN_LOGGING_LEVEL', ) # These allow for partial matches: 'NAME_1' is okay key if 'NAME' is listed known_config_wildcards = () def get_config(key=None, default=None, raise_error=False): """Read expyfun preference from env, then expyfun config Parameters ---------- key : str The preference key to look for. The os environment is searched first, then the expyfun config file is parsed. default : str | None Value to return if the key is not found. raise_error : bool If True, raise an error if the key is not found (instead of returning default). Returns ------- value : str | None The preference key value. """ if key is not None and not isinstance(key, string_types): raise ValueError('key must be a string') # first, check to see if key is in env if key is not None and key in os.environ: return os.environ[key] # second, look for it in expyfun config file config_path = get_config_path() if not op.isfile(config_path): key_found = False val = default else: with open(config_path, 'r') as fid: config = json.load(fid) if key is None: return config key_found = True if key in config else False val = config.get(key, default) if not key_found and raise_error is True: meth_1 = 'os.environ["%s"] = VALUE' % key meth_2 = 'expyfun.utils.set_config("%s", VALUE)' % key raise KeyError('Key "%s" not found in environment or in the ' 'expyfun config file:\n%s\nTry either:\n' ' %s\nfor a temporary solution, or:\n' ' %s\nfor a permanent one. You can also ' 'set the environment variable before ' 'running python.' % (key, config_path, meth_1, meth_2)) return val def set_config(key, value): """Set expyfun preference in config Parameters ---------- key : str | None The preference key to set. If None, a tuple of the valid keys is returned, and ``value`` is ignored. value : str | None The value to assign to the preference key. If None, the key is deleted. """ if key is None: return sorted(known_config_types) if not isinstance(key, string_types): raise ValueError('key must be a string') # While JSON allow non-string types, we allow users to override config # settings using env, which are strings, so we enforce that here if not isinstance(value, string_types) and value is not None: raise ValueError('value must be a string or None') if key not in known_config_types and not \ any(k in key for k in known_config_wildcards): warnings.warn('Setting non-standard config type: "%s"' % key) # Read all previous values config_path = get_config_path() if op.isfile(config_path): with open(config_path, 'r') as fid: config = json.load(fid) else: config = dict() logger.info('Attempting to create new expyfun configuration ' 'file:\n%s' % config_path) if value is None: config.pop(key, None) else: config[key] = value # Write all values directory = op.split(config_path)[0] if not op.isdir(directory): os.mkdir(directory) with open(config_path, 'w') as fid: json.dump(config, fid, sort_keys=True, indent=0) ############################################################################### # MISC def fake_button_press(ec, button='1', delay=0.): """Fake a button press after a delay Notes ----- This function only works with the keyboard controller (not TDT)! It uses threads to ensure that control is passed back, so other commands can be called (like wait_for_presses). """ def send(): ec._response_handler._on_pyglet_keypress(button, [], True) Timer(delay, send).start() if delay > 0. else send() def fake_mouse_click(ec, pos, button='left', delay=0.): """Fake a mouse click after a delay""" button = dict(left=1, middle=2, right=4)[button] # trans to pyglet def send(): ec._mouse_handler._on_pyglet_mouse_click(pos[0], pos[1], button, []) Timer(delay, send).start() if delay > 0. else send() def _check_pyglet_version(raise_error=False): """Check pyglet version, return True if usable. """ import pyglet is_usable = LooseVersion(pyglet.version) >= LooseVersion('1.2') if raise_error is True and is_usable is False: raise ImportError('On Linux, you must run at least Pyglet ' 'version 1.2, and you are running ' '{0}'.format(pyglet.version)) return is_usable def _wait_secs(secs, ec=None): """Wait a specified number of seconds. Parameters ---------- secs : float Number of seconds to wait. ec : None | expyfun.ExperimentController instance The ExperimentController. Notes ----- This function uses a while loop. Although this slams the CPU, it will guarantee that events (keypresses, etc.) are processed. """ # hog the cpu, checking time t0 = clock() if ec is not None: while (clock() - t0) < secs: ec._dispatch_events() ec.check_force_quit() else: wins = _get_display().get_windows() for win in wins: win.dispatch_events() def running_rms(signal, win_length): """RMS of ``signal`` with rectangular window ``win_length`` samples long. Parameters ---------- signal : array_like The (1-dimesional) signal of interest. win_length : int Length (in samples) of the rectangular window """ return sqrt(convolve(signal ** 2, ones(win_length) / win_length, 'valid')) def _fix_audio_dims(signal, n_channels): """Make it so a valid audio buffer is in the standard dimensions Parameters ---------- signal : array_like The signal whose dimensions should be checked and fixed. n_channels : int The number of channels that the output should have. If the input is mono and n_channels=2, it will be tiled to be shape (2, n_samples). Otherwise, the number of channels in signal must match n_channels. Returns ------- signal_fixed : array The signal with standard dimensions (n_channels, N). """ # Check requested channel output n_channels = int(operator.index(n_channels)) signal = np.asarray(np.atleast_2d(signal), dtype=np.float32) # Check dimensionality if signal.ndim != 2: raise ValueError('Sound data must have one or two dimensions, got %s.' % (signal.ndim,)) # Return data with correct dimensions if n_channels == 2 and signal.shape[0] == 1: signal = np.tile(signal, (n_channels, 1)) if signal.shape[0] != n_channels: raise ValueError('signal channel count %d did not match required ' 'channel count %d' % (signal.shape[0], n_channels)) return signal def _sanitize(text_like): """Cast as string, encode as UTF-8 and sanitize any escape characters. """ return text_type(text_like).encode('unicode_escape').decode('utf-8') def _sort_keys(x): """Sort and return keys of dict""" keys = list(x.keys()) # note: not thread-safe idx = np.argsort([str(k) for k in keys]) keys = [keys[ii] for ii in idx] return keys def object_diff(a, b, pre=''): """Compute all differences between two python variables Parameters ---------- a : object Currently supported: dict, list, tuple, ndarray, int, str, bytes, float, StringIO, BytesIO. b : object Must be same type as ``a``. pre : str String to prepend to each line. Returns ------- diffs : str A string representation of the differences. Notes ----- Taken from mne-python with permission. """ out = '' if type(a) != type(b): out += pre + ' type mismatch (%s, %s)\n' % (type(a), type(b)) elif isinstance(a, dict): k1s = _sort_keys(a) k2s = _sort_keys(b) m1 = set(k2s) - set(k1s) if len(m1): out += pre + ' x1 missing keys %s\n' % (m1) for key in k1s: if key not in k2s: out += pre + ' x2 missing key %s\n' % key else: out += object_diff(a[key], b[key], pre + 'd1[%s]' % repr(key)) elif isinstance(a, (list, tuple)): if len(a) != len(b): out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b)) else: for xx1, xx2 in zip(a, b): out += object_diff(xx1, xx2, pre='') elif isinstance(a, (string_types, int, float, bytes)): if a != b: out += pre + ' value mismatch (%s, %s)\n' % (a, b) elif a is None: if b is not None: out += pre + ' a is None, b is not (%s)\n' % (b) elif isinstance(a, np.ndarray): if not np.array_equal(a, b): out += pre + ' array mismatch\n' else: raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a)) return out def _check_skip_backend(backend): from expyfun._sound_controllers import _import_backend import pytest if isinstance(backend, dict): # actually an AC backend = backend['SOUND_CARD_BACKEND'] try: _import_backend(backend) except Exception as exc: pytest.skip('Skipping test for backend %s: %s' % (backend, exc)) def _check_params(params, keys, defaults, name): if not isinstance(params, dict): raise TypeError('{0} must be a dict, got type {1}' .format(name, type(params))) params = deepcopy(params) if not isinstance(params, dict): raise TypeError('{0} must be a dict, got {1}' .format(name, type(params))) # Set sensible defaults for values that are not passed for k in keys: params[k] = params.get(k, get_config(k, defaults.get(k, None))) # Check keys for k in params.keys(): if k not in keys: raise KeyError('Unrecognized key in {0}["{1}"], must be ' 'one of {2}'.format(name, k, ', '.join(keys))) return params def _get_display(): import pyglet try: display = pyglet.canvas.get_display() except AttributeError: # < 1.4 display = pyglet.window.get_platform().get_default_display() return display
2.03125
2
mixin.py
delimatorres/foodbasket
0
3132
import signal class KillableProcess(object): def __init__(self): self.interrupt = False signal.signal(signal.SIGTERM, self._signal_handler) signal.signal(signal.SIGINT, self._signal_handler) def _signal_handler(self, sign, frame): self.interrupt = True
2.34375
2
test5.py
liubaishuo-github/peening-post-processor
0
3133
def HAHA(): return 1,2,3 a = HAHA() print(a) print(a[0])
2.734375
3
torch/_fx/graph_module.py
jsun94/nimble
206
3134
<gh_stars>100-1000 import torch import torch.overrides import linecache from typing import Type, Dict, List, Any, Union from .graph import Graph import copy # normal exec loses the source code, however we can patch # the linecache module to still recover it. # using exec_with_source will add it to our local cache # and then tools like TorchScript will be able to get source info. _next_id = 0 def exec_with_source(src: str, globals: Dict[str, Any]): global _next_id key = f'<eval_with_key_{_next_id}>' _next_id += 1 _eval_cache[key] = [line + '\n' for line in src.splitlines()] exec(compile(src, key, 'exec'), globals) # patch linecache so that any code we exec using exec_with_source # works with inspect _eval_cache : Dict[str, List[str]] = {} _orig_getlines = linecache.getlines def patched_getline(*args, **kwargs): if args[0] in _eval_cache: return _eval_cache[args[0]] return _orig_getlines(*args, **kwargs) linecache.getlines = patched_getline def _forward_from_src(src : str): gbls: Dict[str, Any] = { 'torch': torch } exec_with_source(src, gbls) return gbls['forward'] def deserialize_graphmodule(body : dict) -> torch.nn.Module: """ Deserialize a GraphModule given the dictionary of the original module, using the code to reconstruct the graph. We delete the actual graph before saving the dictionary so that changes to the in-memory graph format do not get serialized. """ # We create a dummy class here because symbolic_trace pulls the forward() # function off of the class, rather than the instance class CodeOnlyModule(torch.nn.Module): def __init__(self, body): super().__init__() self.__dict__ = body CodeOnlyModule.forward = _forward_from_src(body['code']) from .symbolic_trace import Tracer # we shouldn't trace into any of the submodules, they were not # because they were not traced in the original GraphModule class KeepModules(Tracer): def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool: return True return KeepModules().trace(CodeOnlyModule(body)) # copy an attribute value with qualified name 'target' from 'from_module' to 'to_module' # This installs empty Modules where none exist yet if they are subpaths of target def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str): *prefix, field = target.split('.') for item in prefix: f = getattr(from_module, item) t = getattr(to_module, item, None) if f is t: # we have already installed one of its parents # (e.g. target = root.linear.weight, but we have already installed root.linear) # once we install a parent, we no longer need to copy the children # since all the needed properties will already be present return if t is None: t = torch.nn.Module() setattr(to_module, item, t) from_module, to_module = f, t setattr(to_module, field, getattr(from_module, field)) # Assign attribute 'from_obj' to the qualified name 'target' on 'to_module # This installs empty Modules where none exist yet if they are subpaths of target def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str): *prefix, field = target.split('.') for item in prefix: t = getattr(to_module, item, None) if t is None: t = torch.nn.Module() setattr(to_module, item, t) to_module = t setattr(to_module, field, from_obj) class GraphModule(torch.nn.Module): """ GraphModule is an nn.Module generated from an fx.Graph. GraphModule has important attributes: graph : The graph from which this GraphModule was generated code : The Python source code for the function generated from `graph` forward : The Python method generated from `graph` Note that when `graph` is reassigned, `code` and `forward` will be automatically regenerated. """ def __new__(cls: 'Type[GraphModule]', *args, **kwargs): # each instance of a graph module needs its own forward method # so create a new singleton class for each instance. # it is a subclass of the user-defined class, the only difference # is an extra layer to install the forward method class GraphModuleImpl(cls): # type: ignore pass return super().__new__(GraphModuleImpl) def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph): """ Construct a GraphModule. root - `root` can either be an nn.Module instance or a Dict mapping strings to any attribute type. - In the case that `root` is a Module, any references to Module-based objects (via qualified name) in the Graph's Nodes' `target` field will be copied over from the respective place within `root`'s Module hierarchy into the GraphModule's module hierarchy. - In the case that `root` is a dict, the qualified name found in a Node's `target` will be looked up directly in the dict's keys. The object mapped to by the Dict will be copied over into the appropriate place within the GraphModule's module hierarchy. graph - `graph` contains the nodes this GraphModule should use for code generation """ super().__init__() if isinstance(root, torch.nn.Module): if hasattr(root, 'training'): self.training = root.training for node in graph.nodes: if node.op in ['get_attr', 'call_module']: assert isinstance(node.target, str) _copy_attr(root, self, node.target) elif isinstance(root, dict): targets_to_copy = [] for node in graph.nodes: if node.op in ['get_attr', 'call_module']: assert isinstance(node.target, str) if node.target not in root: raise RuntimeError('Node ' + str(node) + ' referenced target ' + node.target + ' but that target was not provided in `root`!') targets_to_copy.append(node.target) # Sort targets in ascending order of the # of atoms. # This will ensure that less deeply nested attributes are assigned # before more deeply nested attributes. For example, foo.bar # will be assigned before foo.bar.baz. Otherwise, we might assign # the user-provided `foo.bar` and wipe out the previously-assigned # `foo.bar.baz` targets_to_copy.sort(key=lambda t: t.count('.')) for target_to_copy in targets_to_copy: _assign_attr(root[target_to_copy], self, target_to_copy) else: raise RuntimeError('Unsupported type ' + str(root) + ' passed for root!') self.graph = graph # TorchScript breaks trying to compile the graph setter because of the # continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842 # # Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway __jit_unused_properties__ = ['graph'] @property def graph(self): return self._graph @graph.setter def graph(self, val) -> None: self._graph = val body, result, free_variables = self._graph.python_code(root_module='self') body = '\n'.join(' ' + line for line in body.split('\n')) + '\n' self.code = f"""\ def forward(self, {', '.join(free_variables)}): {body} return {result} """ cls = type(self) cls.forward = _forward_from_src(self.code) def __reduce__(self): dict_without_graph = self.__dict__.copy() del dict_without_graph['_graph'] return (deserialize_graphmodule, (dict_without_graph,)) # because __reduce__ is defined for serialization, # we need to define deepcopy otherwise it will call __reduce__ # and cause symbolic tracing to occur every time we try to copy the object def __deepcopy__(self, memo): fake_mod = torch.nn.Module() fake_mod.__dict__ = copy.deepcopy(self.__dict__) return GraphModule(fake_mod, self.graph) def __copy__(self): return GraphModule(self, self.graph) def __str__(self) -> str: orig_str = super().__str__() return '\n'.join([orig_str, self.code]) # workarounds for issues in __torch_function__ # WAR for __torch_function__ not handling tensor lists, # fix is in https://github.com/pytorch/pytorch/pull/34725 # orig_cat = torch.cat # def patched_cat(*args, **kwargs): # tensors = args[0] # for t in tensors: # if isinstance(t, Proxy): # return t.__torch_function__(patched_cat, (), args, kwargs) # return orig_cat(*args, **kwargs) # patched_cat.__module__ = 'torch' # patched_cat.__name__ = 'cat' # torch.cat = patched_cat
2.34375
2
RequestHandler.py
robot0nfire/behem0th
2
3135
# # Copyright (c) 2016 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import os import json import struct import threading import socket import queue import tempfile import base64 import select from behem0th import utils, log BLOCK_SIZE = 4096 class Route: def handle(self, data, request): raise NotImplementedError def send(self, data): self.handler.send(self.route_name, data) class FilelistRoute(Route): def handle(self, data, request): if request.is_client: request.client._filelist = data request.client._rlock.release() else: files, events = request.client._merge_filelist(data) with request.client._rlock: self.send(request.client._filelist) for e in events: request.queue_event(e) for f in files: request.queue_file(f[0], f[1]) """ { "action": "<action>", "path": "<relpath-to-file>" } <action> can be either 'receive' or 'send' Payload are base64 encoded chunks (BLOCK_SIZE bytes) """ class FileRoute(Route): def handle(self, data, request): action = data['action'] path = data['path'] if action == 'receive': tmpf = tempfile.NamedTemporaryFile(delete=False) buffer = b'' for chunk in request.recv(): buffer += chunk if len(buffer) >= BLOCK_SIZE: tmpf.write(base64.b64decode(buffer[:BLOCK_SIZE])) buffer = buffer[:BLOCK_SIZE] tmpf.write(base64.b64decode(buffer)) tmpf.close() # watchdog reports a file-deleted and a file-created event, so ignore both. request.client._ignore_next_fsevent(path) request.client._ignore_next_fsevent(path) os.rename(tmpf.name, request.client._abspath(path)) request.client._update_metadata(path) request.client._event_handler._dispatch( 'received', request.client, path, 'file' ) elif action == 'send': request.queue_file('send', path) else: log.warn('FileRoute: Unknown action \'{0}\', igoring.', action) # If we are the 'server', we also need to distribute all file request # to all other clients. if not request.is_client: action = 'send' if action == 'receive' else 'request' request.client._run_on_peers('queue_file', request, action, path) """ { "type": "<type>", "path": "<relpath-to-file>" } <type> can be one of 'file-created', 'file-deleted', 'file-moved' """ class EventRoute(Route): def handle(self, data, request): f_type, event = data['type'].split('-') path = data['path'] abspath = request.client._abspath(path) request.client._ignore_next_fsevent(path) # TODO: factor out common code with Client._handle_fsevent() and Client._merge_filelist() if event == 'created': # create the file/directory if f_type == 'file': open(abspath, 'a').close() else: os.mkdir(abspath, 0o755) request.client._add_to_filelist(path, f_type) elif event == 'deleted': request.client._remove_from_filelist(path) os.remove(abspath) elif event == 'moved': request.client._remove_from_filelist(path) os.rename(abspath, data['dest']) request.client._add_to_filelist(data['dest'], f_type) else: log.warn('EventRoute: Unknown event {0}', data) # For rationale, see FileRoute.handle() if not request.is_client: request.client._run_on_peers('queue_event', request, data) ROUTES = { 'filelist': FilelistRoute(), 'file': FileRoute(), 'event': EventRoute() } """ behem0th's protocol is completely text-based, using utf-8 encoding and encoded in JSON for easy parsing. A request usually looks like this: { "route": "<route-name>", "data": "<data>" } 'data' holds additional data which is then passed to the route. There is no special format designed for 'data' and is specific to each route. After each request there is a newline to separate them. (think of HTTP) If a route needs to transfer additional data (a 'payload'), it has to send them in a text-based format, e.g. base-64 encoding for binary data. After the payload, if any, there has to be another newline to separate it from the next request. """ class RequestHandler(threading.Thread): req_handler_num = 0 def __init__(self, **kwargs): super().__init__() self.daemon = True self.sync_queue = queue.Queue() self.routes = {} self.recvbuf = b'' RequestHandler.req_handler_num += 1 self.name = "request-handler-{0}".format(RequestHandler.req_handler_num) for key, value in kwargs.items(): setattr(self, key, value) with self.client._rlock: self.client._peers.append(self) self.sock.setblocking(0) self.is_client = bool(self.client._sock) for name, route in ROUTES.items(): route.route_name = name route.handler = self self.routes[name] = route def setup(self): log.info('Connected to {0}:{1}', self.address[0], self.address[1]) # If self.client has a (active) socket, it is a client and # thus needs to starts syncing up with the server. if self.is_client: # Lock the client until the filelist has been sent back by the server. self.client._rlock.acquire() self.send('filelist', self.client._filelist) def close(self): self.sync_queue.put({'action': 'exit'}) try: self.sock.shutdown(socket.SHUT_RDWR) except OSError: pass def handle(self, data): try: data = json.loads(data) except ValueError: log.error('Received invalid data: {0}', data) return route = data['route'] data = data['data'] log.info_v('Handling {0}, data:\n{1}', route, data) if route in self.routes: self.routes[route].handle(data, self) else: log.error("Data received on unknown route '{0}'!", route) def send(self, route, data): request = json.dumps({'route': route, 'data': data}) + '\n' self.sock.sendall(request.encode()) def recv(self): if self.recvbuf: # This needs special handling because there could be multiple # request in recvbuf. If this is the case, we can only yield the first # one and have to leave to others in recvbuf. index = self.recvbuf.find(b'\n') if index == -1: yield self.recvbuf self.recvbuf = None else: yield self.recvbuf[:index] self.recvbuf = self.recvbuf[index+1:] return while 1: select.select([self.sock], [], []) chunk = self.sock.recv(1024) if not len(chunk): # If select has signaled the socket is readable, yet .recv() # returns zero bytes, the other end probably performed # a close() or shutdown() on the socket. break index = chunk.find(b'\n') if index == -1: yield chunk else: yield chunk[:index] self.recvbuf = chunk[index+1:] break def queue_file(self, action, path): self.sync_queue.put({ 'action': action + '-file', 'path': path }) def queue_event(self, event): self.sync_queue.put({ 'action': 'send-event', 'event': event }) def sync_worker(self): while 1: entry = self.sync_queue.get() log.info_v('Processing {0}', entry) if entry['action'] == 'exit': break elif entry['action'] == 'send-file': path = entry['path'] abspath = self.client._abspath(path) self.send('file', { 'path': path, 'action': 'receive' }) for buf in utils.read_file_seq(abspath, BLOCK_SIZE): self.sock.sendall(base64.b64encode(buf)) self.sock.sendall(b'\n') self.client._event_handler._dispatch( 'sent', self.client, path, 'file' ) elif entry['action'] == 'request-file': self.send('file', { 'path': entry['path'], 'action': 'send' }) elif entry['action'] == 'send-event': self.send('event', entry['event']) self.sync_queue.task_done() def run(self): self.setup() utils.create_thread(self.sync_worker, name=self.name.replace('request-handler', 'sync-worker')) while 1: buffer = b'' for chunk in self.recv(): buffer += chunk if not len(buffer): break self.handle(buffer.decode()) log.info('Disconnected from {0}:{1}', self.address[0], self.address[1]) self.close()
1.976563
2
tests/utils/test_metrics.py
haochuanwei/hover
251
3136
<filename>tests/utils/test_metrics.py<gh_stars>100-1000 from hover.utils.metrics import classification_accuracy import numpy as np def test_classification_accuracy(): true = np.array([1, 2, 3, 4, 5, 6, 7, 7]) pred = np.array([1, 2, 3, 4, 5, 6, 7, 8]) accl = classification_accuracy(true, pred) accr = classification_accuracy(pred, true) assert np.allclose(accl, 7/8) assert np.allclose(accr, 7/8)
1.976563
2
scripts/blenderseed.package.py
rgirish28/blenderseed
0
3137
<filename>scripts/blenderseed.package.py #!/usr/bin/python # # This source file is part of appleseed. # Visit https://appleseedhq.net/ for additional information and resources. # # This software is released under the MIT license. # # Copyright (c) 2017-2018 <NAME>, The appleseedhq Organization # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the 'Software'), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from __future__ import print_function from distutils import archive_util, dir_util from xml.etree.ElementTree import ElementTree import argparse import colorama import datetime import glob import os import platform import re import shutil import stat import subprocess import sys import time import traceback import urllib #-------------------------------------------------------------------------------------------------- # Constants. #-------------------------------------------------------------------------------------------------- VERSION = "1.1.0" SETTINGS_FILENAME = "blenderseed.package.configuration.xml" #-------------------------------------------------------------------------------------------------- # Utility functions. #-------------------------------------------------------------------------------------------------- GREEN_CHECKMARK = u"{0}\u2713{1}".format(colorama.Style.BRIGHT + colorama.Fore.GREEN, colorama.Style.RESET_ALL) RED_CROSSMARK = u"{0}\u2717{1}".format(colorama.Style.BRIGHT + colorama.Fore.RED, colorama.Style.RESET_ALL) def trace(message): # encode('utf-8') is required to support output redirection to files or pipes. print(u" {0}{1}{2}".format(colorama.Style.DIM + colorama.Fore.WHITE, message, colorama.Style.RESET_ALL).encode('utf-8')) def info(message): print(u" {0}".format(message).encode('utf-8')) def progress(message): print(u" {0}...".format(message).encode('utf-8')) def warning(message): print(u" {0}Warning: {1}.{2}".format(colorama.Style.BRIGHT + colorama.Fore.MAGENTA, message, colorama.Style.RESET_ALL).encode('utf-8')) def fatal(message): print(u"{0}Fatal: {1}. Aborting.{2}".format(colorama.Style.BRIGHT + colorama.Fore.RED, message, colorama.Style.RESET_ALL).encode('utf-8')) if sys.exc_info()[0]: print(traceback.format_exc()) sys.exit(1) def exe(filepath): return filepath + ".exe" if os.name == "nt" else filepath def safe_delete_file(path): try: if os.path.exists(path): os.remove(path) except OSError: fatal("Failed to delete file '" + path + "'") def on_rmtree_error(func, path, exc_info): # path contains the path of the file that couldn't be removed. # Let's just assume that it's read-only and unlink it. os.chmod(path, stat.S_IWRITE) os.unlink(path) def safe_delete_directory(path): Attempts = 10 for attempt in range(Attempts): try: if os.path.exists(path): shutil.rmtree(path, onerror=on_rmtree_error) return except OSError: if attempt < Attempts - 1: time.sleep(0.5) else: fatal("Failed to delete directory '" + path + "'") def safe_delete_directory_recursively(root_path, directory_name): safe_delete_directory(os.path.join(root_path, directory_name)) for entry in os.listdir(root_path): subdirectory = os.path.join(root_path, entry) if os.path.isdir(subdirectory): safe_delete_directory_recursively(subdirectory, directory_name) def safe_make_directory(path): if not os.path.isdir(path): os.makedirs(path) def pushd(path): old_path = os.getcwd() os.chdir(path) return old_path def copy_glob(input_pattern, output_path): for input_file in glob.glob(input_pattern): shutil.copy(input_file, output_path) #-------------------------------------------------------------------------------------------------- # Settings. #-------------------------------------------------------------------------------------------------- class Settings: def load(self): self.this_dir = os.path.dirname(os.path.realpath(__file__)) self.root_dir = os.path.join(self.this_dir, "..") print("Loading settings from " + SETTINGS_FILENAME + "...") tree = ElementTree() try: tree.parse(SETTINGS_FILENAME) except IOError: fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'") self.__load_values(tree) def print_summary(self): print("") print(" Platform: " + self.platform) print(" Path to appleseed release: " + self.appleseed_release_path) print(" Path to appleseed binaries: " + self.appleseed_bin_path) print(" Path to appleseed libraries: " + self.appleseed_lib_path) print(" Path to appleseed shaders: " + self.appleseed_shaders_path) print(" Path to appleseed schemas: " + self.appleseed_schemas_path) print(" Path to appleseed settings: " + self.appleseed_settings_path) print(" Path to appleseed.python: " + self.appleseed_python_path) print(" Path to maketx: " + self.maketx_path) print(" Output directory: " + self.output_dir) print("") def __load_values(self, tree): self.platform = self.__get_required(tree, "platform") self.appleseed_release_path = self.__get_required(tree, "appleseed_release_path") os.environ['APPLESEED'] = self.appleseed_release_path self.appleseed_bin_path = os.path.expandvars(self.__get_required(tree, "appleseed_bin_path")) self.appleseed_lib_path = os.path.expandvars(self.__get_required(tree, "appleseed_lib_path")) self.appleseed_shaders_path = os.path.expandvars(self.__get_required(tree, "appleseed_shaders_path")) self.appleseed_schemas_path = os.path.expandvars(self.__get_required(tree, "appleseed_schemas_path")) self.appleseed_settings_path = os.path.expandvars(self.__get_required(tree, "appleseed_settings_path")) self.appleseed_python_path = os.path.expandvars(self.__get_required(tree, "appleseed_python_path")) self.maketx_path = os.path.expandvars(self.__get_required(tree, "maketx_path")) self.output_dir = os.path.expandvars(self.__get_required(tree, "output_dir")) def __get_required(self, tree, key): value = tree.findtext(key) if value is None: fatal("Missing value \"{0}\" in configuration file".format(key)) return value #-------------------------------------------------------------------------------------------------- # Base package builder. #-------------------------------------------------------------------------------------------------- class PackageBuilder(object): def __init__(self, settings, package_version, build_date, no_release=False): self.settings = settings self.package_version = package_version self.build_date = build_date self.no_release = no_release def build_package(self): print("Building package:") print("") self.orchestrate() print("") print("The package was successfully built.") def orchestrate(self): self.remove_leftovers() self.copy_appleseed_python() self.copy_binaries() self.copy_dependencies() self.copy_schemas() self.copy_shaders() self.download_settings_files() self.remove_pyc_files() self.post_process_package() if not self.no_release: self.deploy_blenderseed_to_stage() self.clean_stage() self.build_final_zip_file() self.remove_stage() def remove_leftovers(self): progress("Removing leftovers from previous invocations") safe_delete_directory(os.path.join(self.settings.root_dir, "appleseed")) safe_delete_directory("blenderseed") def copy_appleseed_python(self): progress("Copying appleseed.python to root directory") # Create destination directory. lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib") safe_make_directory(lib_dir) # Copy appleseed.python. dir_util.copy_tree(self.settings.appleseed_python_path, lib_dir) # Remove _appleseedpython.so (Python 2) since blenderseed only needs _appleseedpython3.so (Python 3). # TODO: implement properly. safe_delete_file(os.path.join(lib_dir, "appleseed", "_appleseedpython.so")) safe_delete_file(os.path.join(lib_dir, "appleseed", "_appleseedpython.pyd")) def copy_binaries(self): progress("Copying binaries to root directory") # Create destination directory. bin_dir = os.path.join(self.settings.root_dir, "appleseed", "bin") safe_make_directory(bin_dir) # Copy appleseed binaries. for bin in [exe("appleseed.cli")]: shutil.copy(os.path.join(self.settings.appleseed_bin_path, bin), bin_dir) # Copy maketx. shutil.copy(exe(self.settings.maketx_path), bin_dir) def copy_schemas(self): progress("Copying schemas to root directory") dir_util.copy_tree(self.settings.appleseed_schemas_path, os.path.join(self.settings.root_dir, "appleseed", "schemas")) safe_delete_file(os.path.join(self.settings.root_dir, "appleseed", "schemas", ".gitignore")) def copy_shaders(self): progress("Copying shaders to root directory") # Create destination directory. shaders_dir = os.path.join(self.settings.root_dir, "appleseed", "shaders") safe_make_directory(shaders_dir) self.__do_copy_shaders(os.path.join(self.settings.appleseed_shaders_path, "appleseed"), shaders_dir) self.__do_copy_shaders(os.path.join(self.settings.appleseed_shaders_path, "blenderseed"), shaders_dir) def __do_copy_shaders(self, source_dir, target_dir): for root, dirs, files in os.walk(source_dir): for f in files: if f.endswith(".oso"): shutil.copy(os.path.join(root, f), target_dir) def download_settings_files(self): progress("Downloading settings files to root directory") # Create destination directory. settings_dir = os.path.join(self.settings.root_dir, "appleseed", "settings") safe_make_directory(settings_dir) for file in ["appleseed.cli.xml"]: urllib.urlretrieve( "https://raw.githubusercontent.com/appleseedhq/appleseed/master/sandbox/settings/{0}".format(file), os.path.join(settings_dir, file)) def remove_pyc_files(self): progress("Removing pyc files from root directory") for root, dirs, files in os.walk(os.path.join(self.settings.root_dir, "appleseed", "lib")): for f in files: if f.endswith(".pyc"): safe_delete_file(os.path.join(root, f)) def deploy_blenderseed_to_stage(self): progress("Deploying blenderseed to staging directory") shutil.copytree(self.settings.root_dir, "blenderseed", ignore=shutil.ignore_patterns("scripts")) def clean_stage(self): progress("Cleaning staging directory") safe_delete_directory_recursively("blenderseed", "__pycache__") for subdirectory in [".git", ".idea", "archives", "docs", "scripts", "tests"]: safe_delete_directory(os.path.join("blenderseed", subdirectory)) for file in [".gitignore", "README.md"]: safe_delete_file(os.path.join("blenderseed", file)) def build_final_zip_file(self): progress("Building final zip file from staging directory") package_name = "blenderseed-{0}-{1}-{2}".format(self.package_version, self.settings.platform, self.build_date) package_path = os.path.join(self.settings.output_dir, package_name) archive_util.make_zipfile(package_path, "blenderseed") info("Package path: {0}".format(package_path + ".zip")) def remove_stage(self): progress("Deleting staging directory") safe_delete_directory("blenderseed") def run(self, cmdline): trace("Running command line: {0}".format(cmdline)) os.system(cmdline) def run_subprocess(self, cmdline): p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return p.returncode, out, err #-------------------------------------------------------------------------------------------------- # Windows package builder. #-------------------------------------------------------------------------------------------------- class WindowsPackageBuilder(PackageBuilder): def copy_dependencies(self): progress("Windows-specific: Copying dependencies") bin_dir = self.settings.appleseed_bin_path for dll in ["appleseed.dll", "appleseed.shared.dll"]: shutil.copy(os.path.join(bin_dir, dll), os.path.join(self.settings.root_dir, "appleseed", "bin")) def post_process_package(self): pass #-------------------------------------------------------------------------------------------------- # Mac package builder. #-------------------------------------------------------------------------------------------------- class MacPackageBuilder(PackageBuilder): SYSTEM_LIBS_PREFIXES = [ "/System/Library/", "/usr/lib/libcurl", "/usr/lib/libc++", "/usr/lib/libbz2", "/usr/lib/libSystem", #"/usr/lib/libz", "/usr/lib/libncurses", "/usr/lib/libobjc.A.dylib" ] def copy_dependencies(self): progress("Mac-specific: Copying dependencies") # Create destination directory. lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib") safe_make_directory(lib_dir) # Copy appleseed libraries. for lib in ["libappleseed.dylib", "libappleseed.shared.dylib"]: shutil.copy(os.path.join(self.settings.appleseed_lib_path, lib), lib_dir) # Get shared libs needed by binaries. all_libs = set() for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")): libs = self.__get_dependencies_for_file(bin) all_libs = all_libs.union(libs) # Get shared libs needed by appleseed.python. appleseedpython_libs = self.__get_dependencies_for_file( os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed", "_appleseedpython3.so")) all_libs = all_libs.union(appleseedpython_libs) # Get shared libs needed by libraries. # TODO: we're not computing the full transitive closure here! lib_libs = set() for lib in all_libs: libs = self.__get_dependencies_for_file(lib) lib_libs = lib_libs.union(libs) all_libs = all_libs.union(lib_libs) if True: # Print dependencies. trace(" Dependencies:") for lib in all_libs: trace(" {0}".format(lib)) # Copy needed libs to lib directory. for lib in all_libs: if True: trace(" Copying {0} to {1}...".format(lib, lib_dir)) shutil.copy(lib, lib_dir) def post_process_package(self): progress("Mac-specific: Post-processing package") self.__fixup_binaries() def __fixup_binaries(self): progress("Mac-specific: Fixing up binaries") self.set_libraries_ids() self.__change_library_paths_in_libraries() self.__change_library_paths_in_executables() def set_libraries_ids(self): lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib") for dirpath, dirnames, filenames in os.walk(lib_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext == ".dylib" or ext == ".so": lib_path = os.path.join(dirpath, filename) self.__set_library_id(lib_path, filename) def __change_library_paths_in_libraries(self): lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib") for dirpath, dirnames, filenames in os.walk(lib_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext == ".dylib" or ext == ".so": lib_path = os.path.join(dirpath, filename) self.__change_library_paths_in_binary(lib_path) def __change_library_paths_in_executables(self): bin_dir = os.path.join(self.settings.root_dir, "appleseed", "bin") for dirpath, dirnames, filenames in os.walk(bin_dir): for filename in filenames: ext = os.path.splitext(filename)[1] if ext != ".py" and ext != ".conf": exe_path = os.path.join(dirpath, filename) self.__change_library_paths_in_binary(exe_path) # Can be used on executables and dynamic libraries. def __change_library_paths_in_binary(self, bin_path): progress("Patching {0}".format(bin_path)) bin_dir = os.path.dirname(bin_path) lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib") path_to_appleseed_lib = os.path.relpath(lib_dir, bin_dir) # fix_paths set to False because we must retrieve the unmodified dependency in order to replace it by the correct one. for lib_path in self.__get_dependencies_for_file(bin_path, fix_paths=False): lib_name = os.path.basename(lib_path) if path_to_appleseed_lib == ".": self.__change_library_path(bin_path, lib_path, "@loader_path/{0}".format(lib_name)) else: self.__change_library_path(bin_path, lib_path, "@loader_path/{0}/{1}".format(path_to_appleseed_lib, lib_name)) def __set_library_id(self, target, name): self.run('install_name_tool -id "{0}" {1}'.format(name, target)) def __change_library_path(self, target, old, new): self.run('install_name_tool -change "{0}" "{1}" {2}'.format(old, new, target)) def __get_dependencies_for_file(self, filepath, fix_paths=True): filename = os.path.basename(filepath) loader_path = os.path.dirname(filepath) rpath = "/usr/local/lib/" # TODO: a great simplification if True: trace("Gathering dependencies for file") trace(" {0}".format(filepath)) trace("with @loader_path set to") trace(" {0}".format(loader_path)) trace("and @rpath hardcoded to") trace(" {0}".format(rpath)) returncode, out, err = self.run_subprocess(["otool", "-L", filepath]) if returncode != 0: fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filepath, err)) libs = set() for line in out.split("\n")[1:]: # skip the first line line = line.strip() # Ignore empty lines. if len(line) == 0: continue # Parse the line. m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line) if not m: fatal("Failed to parse line from otool(1) output: " + line) lib = m.group(1) # Ignore self-references (why do these happen?). if lib == filename: continue # Ignore system libs. if self.__is_system_lib(lib): continue # Ignore Qt frameworks. if re.search(r"Qt.*\.framework", lib): continue if fix_paths: # Handle libs relative to @loader_path. lib = lib.replace("@loader_path", loader_path) # Handle libs relative to @rpath. lib = lib.replace("@rpath", rpath) # Try to handle other relative libs. if not os.path.isabs(lib): # TODO: generalize to a collection of user-specified search paths. candidate = os.path.join(loader_path, lib) if not os.path.exists(candidate): candidate = os.path.join("/usr/local/lib/", lib) if os.path.exists(candidate): info("Resolved relative dependency {0} as {1}".format(lib, candidate)) lib = candidate libs.add(lib) if True: trace("Dependencies for file {0}:".format(filepath)) for lib in libs: if os.path.isfile(lib): trace(u" {0} {1}".format(GREEN_CHECKMARK, lib)) else: trace(u" {0} {1}".format(RED_CROSSMARK, lib)) # Don't check for missing dependencies if we didn't attempt to fix them. if fix_paths: for lib in libs: if not os.path.isfile(lib): fatal("Dependency {0} could not be found on disk".format(lib)) return libs def __is_system_lib(self, lib): for prefix in self.SYSTEM_LIBS_PREFIXES: if lib.startswith(prefix): return True return False #-------------------------------------------------------------------------------------------------- # Linux package builder. #-------------------------------------------------------------------------------------------------- class LinuxPackageBuilder(PackageBuilder): SYSTEM_LIBS_PREFIXES = [ "linux", "librt", "libpthread", "libGL", "libX", "libselinux", "libICE", "libSM", "libdl", "libm.so", "libgcc", "libc.so", "/lib64/ld-linux-", "libstdc++", "libxcb", "libdrm", "libnsl", "libuuid", "libgthread", "libglib", "libgobject", "libglapi", "libffi", "libfontconfig", "libutil", "libpython", "libxshmfence.so" ] def plugin_extension(self): return ".so" def copy_dependencies(self): progress("Linux-specific: Copying dependencies") # Create destination directory. lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib") safe_make_directory(lib_dir) # Copy appleseed libraries. for lib in ["libappleseed.so", "libappleseed.shared.so"]: shutil.copy(os.path.join(self.settings.appleseed_lib_path, lib), lib_dir) # Get shared libs needed by binaries. all_libs = set() for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")): libs = self.__get_dependencies_for_file(bin) all_libs = all_libs.union(libs) # Get shared libs needed by appleseed.python. appleseedpython_libs = self.__get_dependencies_for_file( os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed", "_appleseedpython3.so")) all_libs = all_libs.union(appleseedpython_libs) # Get shared libs needed by libraries. lib_libs = set() for lib in all_libs: libs = self.__get_dependencies_for_file(lib) lib_libs = lib_libs.union(libs) all_libs = all_libs.union(lib_libs) # Copy all shared libraries. for lib in all_libs: shutil.copy(lib, lib_dir) def post_process_package(self): progress("Linux-specific: Post-processing package") for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")): self.run("chrpath -r \$ORIGIN/../lib " + bin) for lib in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "lib", "*.so")): self.run("chrpath -d " + lib) appleseed_python_dir = os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed") for py_cpp_module in glob.glob(os.path.join(appleseed_python_dir, "*.so")): self.run("chrpath -r \$ORIGIN/../ " + py_cpp_module) def __is_system_lib(self, lib): for prefix in self.SYSTEM_LIBS_PREFIXES: if lib.startswith(prefix): return True return False def __get_dependencies_for_file(self, filepath): returncode, out, err = self.run_subprocess(["ldd", filepath]) if returncode != 0: fatal("Failed to invoke ldd(1) to get dependencies for {0}: {1}".format(filepath, err)) libs = set() for line in out.split("\n"): line = line.strip() # Ignore empty lines. if len(line) == 0: continue # Ignore system libs. if self.__is_system_lib(line): continue # Ignore appleseed libs. if "libappleseed" in line: continue libs.add(line.split()[2]) return libs #-------------------------------------------------------------------------------------------------- # Entry point. #-------------------------------------------------------------------------------------------------- def main(): colorama.init() parser = argparse.ArgumentParser(description="build a blenderseed package from sources") parser.add_argument("--nozip", action="store_true", help="copies appleseed binaries to blenderseed folder but does not build a release package") args = parser.parse_args() no_release = args.nozip package_version = subprocess.Popen("git describe --long", stdout=subprocess.PIPE, shell=True).stdout.read().strip() build_date = datetime.date.today().isoformat() print("blenderseed.package version " + VERSION) print("") settings = Settings() settings.load() settings.print_summary() if os.name == "nt": package_builder = WindowsPackageBuilder(settings, package_version, build_date, no_release) elif os.name == "posix" and platform.mac_ver()[0] != "": package_builder = MacPackageBuilder(settings, package_version, build_date, no_release) elif os.name == "posix" and platform.mac_ver()[0] == "": package_builder = LinuxPackageBuilder(settings, package_version, build_date, no_release) else: fatal("Unsupported platform: " + os.name) package_builder.build_package() if __name__ == "__main__": main()
1.179688
1
uts/uts_17_aut_py/2/A.py
viad00/code_olymp
0
3138
<filename>uts/uts_17_aut_py/2/A.py ser = int(input()) mas = list(map(int, input().split())) mas.sort() print(*mas)
2.3125
2
wagtailkatex/wagtail_hooks.py
ongchi/wagtail-katex
0
3139
from django.utils.translation import gettext from wagtail.admin.rich_text.editors.draftail import features as draftail_features from wagtail.core import hooks from .richtext import KaTeXEntityElementHandler, katex_entity_decorator @hooks.register('register_rich_text_features') def register_katex_features(features): features.default_features.append('katex') """ Registering the `katex` feature, which uses the `KATEX` Draft.js entity type, and is stored as HTML with a `<div data-katex-embed="c = \\pm\\sqrt{a^2 + b^2}">` tag. """ feature_name = 'katex-embed' type_ = 'KATEX-EMBED' features.register_editor_plugin( 'draftail', feature_name, draftail_features.EntityFeature( { 'type': type_, 'icon': 'square-root-alt', 'description': gettext('Equation'), }, js=[ 'wagtailkatex/katex/katex.min.js', 'wagtailkatex/wagtailkatex.js', ], css={ 'all': [ 'wagtailkatex/katex/katex.min.css', ] } ) ) features.register_converter_rule('contentstate', feature_name, { 'from_database_format': {'div[data-katex-embed]': KaTeXEntityElementHandler()}, 'to_database_format': {'entity_decorators': {type_: katex_entity_decorator}}, })
1.96875
2
esque_wire/protocol/serializers/api/elect_preferred_leaders_request.py
real-digital/esque-wire
0
3140
############################################################### # Autogenerated module. Please don't modify. # # Edit according file in protocol_generator/templates instead # ############################################################### from typing import Dict from ...structs.api.elect_preferred_leaders_request import ElectPreferredLeadersRequestData, TopicPartition from ._main_serializers import ArraySerializer, ClassSerializer, Schema, int32Serializer, stringSerializer topicPartitionSchemas: Dict[int, Schema] = { 0: [("topic", stringSerializer), ("partition_id", ArraySerializer(int32Serializer))] } topicPartitionSerializers: Dict[int, ClassSerializer[TopicPartition]] = { version: ClassSerializer(TopicPartition, schema) for version, schema in topicPartitionSchemas.items() } topicPartitionSerializers[-1] = topicPartitionSerializers[0] electPreferredLeadersRequestDataSchemas: Dict[int, Schema] = { 0: [("topic_partitions", ArraySerializer(topicPartitionSerializers[0])), ("timeout_ms", int32Serializer)] } electPreferredLeadersRequestDataSerializers: Dict[int, ClassSerializer[ElectPreferredLeadersRequestData]] = { version: ClassSerializer(ElectPreferredLeadersRequestData, schema) for version, schema in electPreferredLeadersRequestDataSchemas.items() } electPreferredLeadersRequestDataSerializers[-1] = electPreferredLeadersRequestDataSerializers[0]
1.601563
2
test/tests/bootstrap/test_api20_windows_bootstrap.py
arunrordell/RackHD
451
3141
''' Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved. This script tests arbitrary payload of the RackHD API 2.0 OS bootstrap workflows. The default case is running a minimum payload Windows OS install. Other Windows-type OS install cases can be specified by creating a payload file and specifiying it using the '-extra' argument. This test takes 30-45 minutes to run. Example payload file (installed in configuration dir): {"bootstrap-payload": {"name": "Graph.InstallWindowsServer", "options": {"defaults": {"version": "2012", "repo": "http://172.31.128.1:8080/repo/winpe", "smbRepo": "\\\\172.31.128.1\\windowsServer2012", "productkey": "<KEY>", "username": "rackhduser", "password": "<PASSWORD>", "smbUser": "vagrant", "smbPassword": "<PASSWORD>"}}} } Example command line using external payload file: python run_tests.py -stack 4 -test tests/bootstrap/test_api20_windows_bootstrap.py -extra base_windows_2012_install.json RackHD Windows installation workflow requires special configuration of the RackHD server: - A customized WinPE environment installed on RackHD server as documented here: https://github.com/RackHD/on-tools/tree/master/winpe - Samba installed on the RackHD server and configured as documented here: http://rackhd.readthedocs.io/en/latest/rackhd/install_os.html?highlight=os%20install - Windows 2012 installation distro installed on RackHD server or equivalent NFS mount. - Windows 2012 activation key in the installation payload file. ''' import fit_path # NOQA: unused import from nose.plugins.attrib import attr import fit_common import flogging import random import json import time from nosedep import depends from datetime import datetime log = flogging.get_loggers() # sample default base payload PAYLOAD = {"name": "Graph.InstallWindowsServer", "options": {"defaults": {"version": "2012", "repo": "http://172.31.128.1:8080/repo/winpe", "smbRepo": "\\\\172.31.128.1\\windowsServer2012", "productkey": "<KEY>", "username": "rackhduser", "password": "<PASSWORD>", "smbUser": "vagrant", "smbPassword": "<PASSWORD>"}}} # if an external payload file is specified, use that config = fit_common.fitcfg().get('bootstrap-payload', None) if config: PAYLOAD = config # function to return the value of a field from the workflow response def findall(obj, key): if isinstance(obj, dict): for k, v in obj.items(): if k == key: log.error(" workflow error: %s", v) findall(v, key) elif isinstance(obj, list): for item in obj: findall(item, key) else: pass # this routine polls a workflow task ID for completion def wait_for_workflow_complete(instanceid, start_time, waittime=3200, cycle=30): log.info_1(" Workflow started at time: " + str(datetime.fromtimestamp(start_time))) while time.time() - start_time < waittime: # limit test to waittime seconds result = fit_common.rackhdapi("/api/2.0/workflows/" + instanceid) if result['status'] != 200: log.error(" HTTP error: " + result['text']) return False if result['json']['status'] in ['running', 'pending']: log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status'])) fit_common.time.sleep(cycle) elif result['json']['status'] == 'succeeded': log.info_1("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status'])) end_time = time.time() log.info_1(" Workflow completed at time: " + str(datetime.fromtimestamp(end_time))) log.info_1(" Workflow duration: " + str(end_time - start_time)) return True else: end_time = time.time() log.info_1(" Workflow failed at time: " + str(datetime.fromtimestamp(end_time))) log.info_1(" Workflow duration: " + str(end_time - start_time)) try: res = json.loads(result['text']) findall(res, "error") except: res = result['text'] log.error(" Workflow failed: status: %s", result['json']['status']) log.error(" Data: %s", json.dumps(res, indent=4, separators=(',', ':'))) return False try: res = json.loads(result['text']) except: res = result['text'] log.error(" Workflow Timeout: " + json.dumps(res, indent=4, separators=(',', ':'))) return False # ------------------------ Tests ------------------------------------- @attr(all=False) class api20_bootstrap_windows(fit_common.unittest.TestCase): @classmethod def setUpClass(cls): # Get the list of nodes NODECATALOG = fit_common.node_select() assert (len(NODECATALOG) != 0), "There are no nodes currently discovered" # Select one node at random cls.__NODE = NODECATALOG[random.randint(0, len(NODECATALOG) - 1)] # Print node Id, node BMC mac ,node type nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + cls.__NODE)['json'] nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name'] monurl = "/api/2.0/nodes/" + cls.__NODE + "/catalogs/bmc" mondata = fit_common.rackhdapi(monurl, action="get") catalog = mondata['json'] bmcresult = mondata['status'] if bmcresult != 200: log.info_1(" Node ID: " + cls.__NODE) log.info_1(" Error on catalog/bmc command") else: log.info_1(" Node ID: " + cls.__NODE) log.info_1(" Node SKU: " + nodesku) log.info_1(" Node BMC Mac: %s", catalog.get('data')['MAC Address']) log.info_1(" Node BMC IP Addr: %s", catalog.get('data')['IP Address']) log.info_1(" Node BMC IP Addr Src: %s", catalog.get('data')['IP Address Source']) # delete active workflows for specified node result = fit_common.cancel_active_workflows(cls.__NODE) assert (result is True), "There are still some active workflows running against the node" def test01_node_check(self): # Log node data nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json'] nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name'] log.info_1(" Node ID: %s ", self.__class__.__NODE) log.info_1(" Node SKU: %s ", nodesku) log.info_1(" Graph Name: Graph.PowerOn.Node") # Ensure the compute node is powered on and reachable result = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE + '/workflows', action='post', payload={"name": "Graph.PowerOn.Node"}) self.assertEqual(result['status'], 201, "Node Power on workflow API failed, see logs.") self.assertTrue(wait_for_workflow_complete(result['json']['instanceId'], time.time(), 50, 5), "Node Power on workflow failed, see logs.") @depends(after=test01_node_check) def test02_os_install(self): # Log node data nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json'] nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name'] log.info_1(" Node ID: " + self.__class__.__NODE) log.info_1(" Node SKU: " + nodesku) log.info_1(" Graph Name: Graph.InstallWindowsServer") log.info_1(" Payload: " + fit_common.json.dumps(PAYLOAD)) # launch workflow workflowid = None result = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE + '/workflows', action='post', payload=PAYLOAD) if result['status'] == 201: # workflow running log.info_1(" InstanceID: " + result['json']['instanceId']) workflowid = result['json']['instanceId'] else: # workflow failed with response code log.error(" InstanceID: " + result['text']) self.fail("Workflow failed with response code: " + result['status']) self.assertTrue(wait_for_workflow_complete(workflowid, time.time()), "OS Install workflow failed, see logs.") if __name__ == '__main__': fit_common.unittest.main()
2.203125
2
random_number.py
till-h/alexa
0
3142
from flask import Flask, render_template from flask_ask import Ask, statement import random app = Flask(__name__) ask = Ask(app, '/') @ask.intent('RandomNumber', convert={'lowerLimit': int, 'upperLimit': int}) def hello(lowerLimit, upperLimit): if lowerLimit == None: lowerLimit = 0 if upperLimit == None: upperLimit = 100 number = random.randint(lowerLimit, upperLimit) text = render_template('random_number', lowerLimit=lowerLimit, upperLimit=upperLimit, number=number) return statement(text).simple_card('Flask-Ask Random Number', text) if __name__ == '__main__': app.run(debug=True)
2.890625
3
model/losses.py
askerlee/rift
11
3143
<reponame>askerlee/rift import torch import numpy as np import torch.nn as nn import torch.nn.functional as F import torchvision.models as models from model.laplacian import LapLoss device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class EPE(nn.Module): def __init__(self): super(EPE, self).__init__() def forward(self, flow, gt, loss_mask): loss_map = (flow - gt.detach()) ** 2 loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5 return (loss_map * loss_mask) class Ternary(nn.Module): def __init__(self): super(Ternary, self).__init__() patch_size = 7 out_channels = patch_size * patch_size self.w = np.eye(out_channels).reshape( (patch_size, patch_size, 1, out_channels)) self.w = np.transpose(self.w, (3, 2, 0, 1)) self.w = torch.tensor(self.w).float().to(device) def transform(self, img): patches = F.conv2d(img, self.w, padding=3, bias=None) transf = patches - img transf_norm = transf / torch.sqrt(0.81 + transf**2) return transf_norm def rgb2gray(self, rgb): r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :] gray = 0.2989 * r + 0.5870 * g + 0.1140 * b return gray def hamming(self, t1, t2): dist = (t1 - t2) ** 2 dist_norm = torch.mean(dist / (0.1 + dist), 1, True) return dist_norm def valid_mask(self, t, padding): n, _, h, w = t.size() inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t) mask = F.pad(inner, [padding] * 4) return mask def forward(self, img0, img1): img0 = self.transform(self.rgb2gray(img0)) img1 = self.transform(self.rgb2gray(img1)) return self.hamming(img0, img1) * self.valid_mask(img0, 1) class SOBEL(nn.Module): def __init__(self): super(SOBEL, self).__init__() self.kernelX = torch.tensor([ [1, 0, -1], [2, 0, -2], [1, 0, -1], ]).float() self.kernelY = self.kernelX.clone().T self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device) self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device) def forward(self, pred, gt): N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3] img_stack = torch.cat( [pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0) sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1) sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1) pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:] pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:] L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y) loss = (L1X+L1Y) return loss class MeanShift(nn.Conv2d): def __init__(self, data_mean, data_std, data_range=1, norm=True): c = len(data_mean) super(MeanShift, self).__init__(c, c, kernel_size=1) std = torch.Tensor(data_std) self.weight.data = torch.eye(c).view(c, c, 1, 1) if norm: self.weight.data.div_(std.view(c, 1, 1, 1)) self.bias.data = -1 * data_range * torch.Tensor(data_mean) self.bias.data.div_(std) else: self.weight.data.mul_(std.view(c, 1, 1, 1)) self.bias.data = data_range * torch.Tensor(data_mean) self.requires_grad = False class VGGPerceptualLoss(torch.nn.Module): def __init__(self, rank=0): super(VGGPerceptualLoss, self).__init__() blocks = [] pretrained = True self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda() for param in self.parameters(): param.requires_grad = False def forward(self, X, Y, indices=None): X = self.normalize(X) Y = self.normalize(Y) indices = [2, 7, 12, 21, 30] weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5] k = 0 loss = 0 for i in range(indices[-1]): X = self.vgg_pretrained_features[i](X) Y = self.vgg_pretrained_features[i](Y) if (i+1) in indices: loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1 k += 1 return loss # flow could have any channels. # https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py def flow_smooth_delta(flow, if_second_order=False): def gradient(x): D_dx = x[:, :, :, 1:] - x[:, :, :, :-1] D_dy = x[:, :, 1:] - x[:, :, :-1] return D_dx, D_dy dx, dy = gradient(flow) # dx2, dxdy = gradient(dx) # dydx, dy2 = gradient(dy) if if_second_order: dx2, dxdy = gradient(dx) dydx, dy2 = gradient(dy) smooth_loss = dx.abs().mean() + dy.abs().mean() + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean() else: smooth_loss = dx.abs().mean() + dy.abs().mean() # smooth_loss = dx.abs().mean() + dy.abs().mean() # + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean() # 暂时不上二阶的平滑损失,似乎加上以后就太猛了,无法降低photo loss TODO return smooth_loss # flow should have 4 channels. # https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py # weight_type='exp' seems to perform better than 'gauss'. def edge_aware_smoothness_order1(img0, img1, flow, constant=1.0, weight_type='exp', error_type='L1'): def weight_fn(x): if weight_type == 'gauss': y = x ** 2 elif weight_type == 'exp': y = torch.abs(x) else: raise ValueError('') return y def gradient_xy(img): gx = img[:, :, :, :-1] - img[:, :, :, 1:] gy = img[:, :, :-1, :] - img[:, :, 1:, :] return gx, gy def gradweight_xy(img0, img1): img0_gx, img0_gy = gradient_xy(img0) img1_gx, img1_gy = gradient_xy(img1) img0_wx = torch.exp(-torch.mean(weight_fn(constant * img0_gx), 1, keepdim=True)) img0_wy = torch.exp(-torch.mean(weight_fn(constant * img0_gy), 1, keepdim=True)) img1_wx = torch.exp(-torch.mean(weight_fn(constant * img1_gx), 1, keepdim=True)) img1_wy = torch.exp(-torch.mean(weight_fn(constant * img1_gy), 1, keepdim=True)) # First two flow channels: 1->0 flow. So use img1 weights. # Second two flow channels: 0->1 flow. So use img0 weights. # weights_x and weights_y are for x and y's spatial gradients, respectively. weights_x = torch.cat([img1_wx, img1_wx, img0_wx, img0_wx], dim=1) weights_y = torch.cat([img1_wy, img0_wy, img0_wy, img1_wy], dim=1) return weights_x, weights_y def error_fn(x): if error_type == 'L1': y = torch.abs(x) elif error_type == 'abs_robust': y = (torch.abs(x) + 0.01).pow(0.4) else: raise ValueError('') return y # The flow gradients along x, y axes, respectively. # flow_gx, flow_gy have the same number of channels as flow. # No matter the flow is x- or y-flow, it should be smooth along both x and y axes. # I.e., a y-flow should also be smooth along x-axis, and x-flow should also be smooth along y-axis. flow_gx, flow_gy = gradient_xy(flow) # weights_x, weights_y both have 4 channels, same as flow_gx and flow_gy (if the input flow has 4 channels). weights_x, weights_y = gradweight_xy(img0, img1) smoothness_x = error_fn(flow_gx) * weights_x smoothness_y = error_fn(flow_gy) * weights_y return torch.mean(smoothness_x) + torch.mean(smoothness_y) # Dual teaching helps slightly. def dual_teaching_loss(mid_gt, img_stu, flow_stu, img_tea, flow_tea): loss_distill = 0 # Ws[0]: weight of teacher -> student. # Ws[1]: weight of student -> teacher. # Two directions could take different weights. # Set Ws[1] to 0 to disable student -> teacher. Ws = [1, 0.5] use_lap_loss = False # Laplacian loss performs better in earlier epochs, but worse in later epochs. # Moreover, Laplacian loss is significantly slower. if use_lap_loss: loss_fun = LapLoss(max_levels=3, reduction='none') else: loss_fun = nn.L1Loss(reduction='none') for i in range(2): student_error = loss_fun(img_stu, mid_gt).mean(1, True) teacher_error = loss_fun(img_tea, mid_gt).mean(1, True) # distill_mask indicates where the warped images according to student's prediction # is worse than that of the teacher. # If at some points, the warped image of the teacher is better than the student, # then regard the flow at these points are more accurate, and use them to teach the student. distill_mask = (student_error > teacher_error + 0.01).float().detach() # loss_distill is the sum of the distillation losses at 2 directions. loss_distill += Ws[i] * ((flow_tea.detach() - flow_stu).abs() * distill_mask).mean() # Swap student and teacher, and calculate the distillation loss again. img_stu, flow_stu, img_tea, flow_tea = \ img_tea, flow_tea, img_stu, flow_stu # The distillation loss from the student to the teacher is given a smaller weight. # loss_distill = loss_distill / 2 return loss_distill if __name__ == '__main__': img0 = torch.zeros(3, 3, 256, 256).float().to(device) img1 = torch.tensor(np.random.normal( 0, 1, (3, 3, 256, 256))).float().to(device) ternary_loss = Ternary() print(ternary_loss(img0, img1).shape)
2.296875
2
project/python/swarm_simulation.py
righetti/swarmrobotics
8
3144
import numpy as np import pybullet as p import itertools from robot import Robot class World(): def __init__(self): # create the physics simulator self.physicsClient = p.connect(p.GUI) p.setGravity(0,0,-9.81) self.max_communication_distance = 2.0 # We will integrate every 4ms (250Hz update) self.dt = 1./250. p.setPhysicsEngineParameter(self.dt, numSubSteps=1) # Create the plane. self.planeId = p.loadURDF("../models/plane.urdf") p.changeDynamics(self.planeId, -1, lateralFriction=5., rollingFriction=0) self.goalId = p.loadURDF("../models/goal.urdf") self.goalId = p.loadURDF("../models/goal2.urdf") # the balls self.ball1 = p.loadURDF("../models/ball1.urdf") p.resetBasePositionAndOrientation(self.ball1, [2., 4., 0.5], (0., 0., 0.5, 0.5)) self.ball2 = p.loadURDF("../models/ball2.urdf") p.resetBasePositionAndOrientation(self.ball2, [4., 2., 0.5], (0., 0., 0.5, 0.5)) p.resetDebugVisualizerCamera(7.0,90.0, -43.0, (1., 1., 0.0)) # Add objects wallId = p.loadSDF("../models/walls.sdf")[0] p.resetBasePositionAndOrientation(wallId, [0., -1., 0], (0., 0., 0.5, 0.5)) wallId = p.loadSDF("../models/walls.sdf")[0] p.resetBasePositionAndOrientation(wallId, [0., 1., 0], (0., 0., 0.5, 0.5)) wallId = p.loadSDF("../models/walls.sdf")[0] p.resetBasePositionAndOrientation(wallId, [3., -1., 0], (0., 0., 0.5, 0.5)) wallId = p.loadSDF("../models/walls.sdf")[0] p.resetBasePositionAndOrientation(wallId, [3., 1., 0], (0., 0., 0.5, 0.5)) wallId = p.loadSDF("../models/walls.sdf")[0] p.resetBasePositionAndOrientation(wallId, [1., 2., 0], (0., 0., 0., 1.)) wallId = p.loadSDF("../models/walls.sdf")[0] p.resetBasePositionAndOrientation(wallId, [2., -2., 0], (0., 0., 0., 1.)) # tube # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-1., 5., 0], (0., 0., 0., 1.)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-1., 6., 0], (0., 0., 0., 1.)) # #arena # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-2, 4., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-2., 7., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-2., 9., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-2., 11., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-2., 13., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-3., 3., 0], (0., 0., 0., 1.)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-5., 3., 0], (0., 0., 0., 1.)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-7., 3., 0], (0., 0., 0., 1.)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-8, 4., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-8., 6., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-8., 8., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-8., 10., 0], (0., 0., 0.5, 0.5)) # wallId = p.loadSDF("../models/walls.sdf")[0] # p.resetBasePositionAndOrientation(wallId, [-8., 12., 0], (0., 0., 0.5, 0.5)) # create 6 robots self.robots = [] for (i,j) in itertools.product(range(3), range(2)): self.robots.append(Robot([1. * i + 0.5, 1. * j - 0.5, 0.3], 2*i+j, self.dt)) p.stepSimulation() self.time = 0.0 self.stepSimulation() self.stepSimulation() def reset(self): """ Resets the position of all the robots """ for r in self.robots: r.reset() p.stepSimulation() def stepSimulation(self): """ Simulates one step simulation """ # for each robot construct list of neighbors for r in self.robots: r.neighbors = [] #reset neighbors r.messages_received = [] #reset message received pos1, or1 = r.get_pos_and_orientation() for j,r2 in enumerate(self.robots): if(r.id != r2.id): pos2, or2 = r2.get_pos_and_orientation() if(np.linalg.norm(pos1-pos2) < self.max_communication_distance): r.neighbors.append(j) # for each robot send and receive messages for i,r in enumerate(self.robots): for msg in r.messages_to_send: if msg[0] in r.neighbors: #then we can send the message self.robots[msg[0]].messages_received.append([i, msg[1]]) #add the sender id r.messages_to_send = [] # update the controllers if self.time > 1.0: for r in self.robots: r.compute_controller() # do one simulation step p.stepSimulation() self.time += self.dt
2.5
2
boto/ec2/elb/__init__.py
wt/boto
15
3145
<gh_stars>10-100 # Copyright (c) 2006-2012 <NAME> http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. # All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # """ This module provides an interface to the Elastic Compute Cloud (EC2) load balancing service from AWS. """ from boto.connection import AWSQueryConnection from boto.ec2.instanceinfo import InstanceInfo from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones from boto.ec2.elb.instancestate import InstanceState from boto.ec2.elb.healthcheck import HealthCheck from boto.ec2.elb.listelement import ListElement from boto.regioninfo import RegionInfo, get_regions, load_regions import boto RegionData = load_regions().get('elasticloadbalancing', {}) def regions(): """ Get all available regions for the ELB service. :rtype: list :return: A list of :class:`boto.RegionInfo` instances """ return get_regions('elasticloadbalancing', connection_cls=ELBConnection) def connect_to_region(region_name, **kw_params): """ Given a valid region name, return a :class:`boto.ec2.elb.ELBConnection`. :param str region_name: The name of the region to connect to. :rtype: :class:`boto.ec2.ELBConnection` or ``None`` :return: A connection to the given region, or None if an invalid region name is given """ for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None class ELBConnection(AWSQueryConnection): APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01') DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1') DefaultRegionEndpoint = boto.config.get('Boto', 'elb_region_endpoint', 'elasticloadbalancing.us-east-1.amazonaws.com') def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', security_token=None, validate_certs=True, profile_name=None): """ Init method to create a new connection to EC2 Load Balancing Service. .. note:: The region argument is overridden by the region specified in the boto configuration file. """ if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region super(ELBConnection, self).__init__(aws_access_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory, path, security_token, validate_certs=validate_certs, profile_name=profile_name) def _required_auth_capability(self): return ['ec2'] def build_list_params(self, params, items, label): if isinstance(items, basestring): items = [items] for index, item in enumerate(items): params[label % (index + 1)] = item def get_all_load_balancers(self, load_balancer_names=None): """ Retrieve all load balancers associated with your account. :type load_balancer_names: list :keyword load_balancer_names: An optional list of load balancer names. :rtype: :py:class:`boto.resultset.ResultSet` :return: A ResultSet containing instances of :class:`boto.ec2.elb.loadbalancer.LoadBalancer` """ params = {} if load_balancer_names: self.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') return self.get_list('DescribeLoadBalancers', params, [('member', LoadBalancer)]) def create_load_balancer(self, name, zones, listeners=None, subnets=None, security_groups=None, scheme='internet-facing', complex_listeners=None): """ Create a new load balancer for your account. By default the load balancer will be created in EC2. To create a load balancer inside a VPC, parameter zones must be set to None and subnets must not be None. The load balancer will be automatically created under the VPC that contains the subnet(s) specified. :type name: string :param name: The mnemonic name associated with the new load balancer :type zones: List of strings :param zones: The names of the availability zone(s) to add. :type listeners: List of tuples :param listeners: Each tuple contains three or four values, (LoadBalancerPortNumber, InstancePortNumber, Protocol, [SSLCertificateId]) where LoadBalancerPortNumber and InstancePortNumber are integer values between 1 and 65535, Protocol is a string containing either 'TCP', 'SSL', HTTP', or 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM certificate, and must be specified when doing HTTPS. :type subnets: list of strings :param subnets: A list of subnet IDs in your VPC to attach to your LoadBalancer. :type security_groups: list of strings :param security_groups: The security groups assigned to your LoadBalancer within your VPC. :type scheme: string :param scheme: The type of a LoadBalancer. By default, Elastic Load Balancing creates an internet-facing LoadBalancer with a publicly resolvable DNS name, which resolves to public IP addresses. Specify the value internal for this option to create an internal LoadBalancer with a DNS name that resolves to private IP addresses. This option is only available for LoadBalancers attached to an Amazon VPC. :type complex_listeners: List of tuples :param complex_listeners: Each tuple contains four or five values, (LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol, SSLCertificateId). Where: - LoadBalancerPortNumber and InstancePortNumber are integer values between 1 and 65535 - Protocol and InstanceProtocol is a string containing either 'TCP', 'SSL', 'HTTP', or 'HTTPS' - SSLCertificateId is the ARN of an SSL certificate loaded into AWS IAM :rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer` :return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer` """ if not listeners and not complex_listeners: # Must specify one of the two options return None params = {'LoadBalancerName': name, 'Scheme': scheme} # Handle legacy listeners if listeners: for index, listener in enumerate(listeners): i = index + 1 protocol = listener[2].upper() params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] params['Listeners.member.%d.InstancePort' % i] = listener[1] params['Listeners.member.%d.Protocol' % i] = listener[2] if protocol == 'HTTPS' or protocol == 'SSL': params['Listeners.member.%d.SSLCertificateId' % i] = listener[3] # Handle the full listeners if complex_listeners: for index, listener in enumerate(complex_listeners): i = index + 1 protocol = listener[2].upper() InstanceProtocol = listener[3].upper() params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] params['Listeners.member.%d.InstancePort' % i] = listener[1] params['Listeners.member.%d.Protocol' % i] = listener[2] params['Listeners.member.%d.InstanceProtocol' % i] = listener[3] if protocol == 'HTTPS' or protocol == 'SSL': params['Listeners.member.%d.SSLCertificateId' % i] = listener[4] if zones: self.build_list_params(params, zones, 'AvailabilityZones.member.%d') if subnets: self.build_list_params(params, subnets, 'Subnets.member.%d') if security_groups: self.build_list_params(params, security_groups, 'SecurityGroups.member.%d') load_balancer = self.get_object('CreateLoadBalancer', params, LoadBalancer) load_balancer.name = name load_balancer.listeners = listeners load_balancer.availability_zones = zones load_balancer.subnets = subnets load_balancer.security_groups = security_groups return load_balancer def create_load_balancer_listeners(self, name, listeners=None, complex_listeners=None): """ Creates a Listener (or group of listeners) for an existing Load Balancer :type name: string :param name: The name of the load balancer to create the listeners for :type listeners: List of tuples :param listeners: Each tuple contains three or four values, (LoadBalancerPortNumber, InstancePortNumber, Protocol, [SSLCertificateId]) where LoadBalancerPortNumber and InstancePortNumber are integer values between 1 and 65535, Protocol is a string containing either 'TCP', 'SSL', HTTP', or 'HTTPS'; SSLCertificateID is the ARN of a AWS IAM certificate, and must be specified when doing HTTPS. :type complex_listeners: List of tuples :param complex_listeners: Each tuple contains four or five values, (LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol, SSLCertificateId). Where: - LoadBalancerPortNumber and InstancePortNumber are integer values between 1 and 65535 - Protocol and InstanceProtocol is a string containing either 'TCP', 'SSL', 'HTTP', or 'HTTPS' - SSLCertificateId is the ARN of an SSL certificate loaded into AWS IAM :return: The status of the request """ if not listeners and not complex_listeners: # Must specify one of the two options return None params = {'LoadBalancerName': name} # Handle the simple listeners if listeners: for index, listener in enumerate(listeners): i = index + 1 protocol = listener[2].upper() params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] params['Listeners.member.%d.InstancePort' % i] = listener[1] params['Listeners.member.%d.Protocol' % i] = listener[2] if protocol == 'HTTPS' or protocol == 'SSL': params['Listeners.member.%d.SSLCertificateId' % i] = listener[3] # Handle the full listeners if complex_listeners: for index, listener in enumerate(complex_listeners): i = index + 1 protocol = listener[2].upper() InstanceProtocol = listener[3].upper() params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0] params['Listeners.member.%d.InstancePort' % i] = listener[1] params['Listeners.member.%d.Protocol' % i] = listener[2] params['Listeners.member.%d.InstanceProtocol' % i] = listener[3] if protocol == 'HTTPS' or protocol == 'SSL': params['Listeners.member.%d.SSLCertificateId' % i] = listener[4] return self.get_status('CreateLoadBalancerListeners', params) def delete_load_balancer(self, name): """ Delete a Load Balancer from your account. :type name: string :param name: The name of the Load Balancer to delete """ params = {'LoadBalancerName': name} return self.get_status('DeleteLoadBalancer', params) def delete_load_balancer_listeners(self, name, ports): """ Deletes a load balancer listener (or group of listeners) :type name: string :param name: The name of the load balancer to create the listeners for :type ports: List int :param ports: Each int represents the port on the ELB to be removed :return: The status of the request """ params = {'LoadBalancerName': name} for index, port in enumerate(ports): params['LoadBalancerPorts.member.%d' % (index + 1)] = port return self.get_status('DeleteLoadBalancerListeners', params) def enable_availability_zones(self, load_balancer_name, zones_to_add): """ Add availability zones to an existing Load Balancer All zones must be in the same region as the Load Balancer Adding zones that are already registered with the Load Balancer has no effect. :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :type zones: List of strings :param zones: The name of the zone(s) to add. :rtype: List of strings :return: An updated list of zones for this Load Balancer. """ params = {'LoadBalancerName': load_balancer_name} self.build_list_params(params, zones_to_add, 'AvailabilityZones.member.%d') obj = self.get_object('EnableAvailabilityZonesForLoadBalancer', params, LoadBalancerZones) return obj.zones def disable_availability_zones(self, load_balancer_name, zones_to_remove): """ Remove availability zones from an existing Load Balancer. All zones must be in the same region as the Load Balancer. Removing zones that are not registered with the Load Balancer has no effect. You cannot remove all zones from an Load Balancer. :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :type zones: List of strings :param zones: The name of the zone(s) to remove. :rtype: List of strings :return: An updated list of zones for this Load Balancer. """ params = {'LoadBalancerName': load_balancer_name} self.build_list_params(params, zones_to_remove, 'AvailabilityZones.member.%d') obj = self.get_object('DisableAvailabilityZonesForLoadBalancer', params, LoadBalancerZones) return obj.zones def modify_lb_attribute(self, load_balancer_name, attribute, value): """Changes an attribute of a Load Balancer :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :type attribute: string :param attribute: The attribute you wish to change. * crossZoneLoadBalancing - Boolean (true) * accessLog - :py:class:`AccessLogAttribute` instance * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance :type value: string :param value: The new value for the attribute :rtype: bool :return: Whether the operation succeeded or not """ bool_reqs = ('crosszoneloadbalancing',) if attribute.lower() in bool_reqs: if isinstance(value, bool): if value: value = 'true' else: value = 'false' params = {'LoadBalancerName': load_balancer_name} if attribute.lower() == 'crosszoneloadbalancing': params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled' ] = value elif attribute.lower() == 'accesslog': params['LoadBalancerAttributes.AccessLog.Enabled'] = \ value.enabled and 'true' or 'false' params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \ value.s3_bucket_name params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \ value.s3_bucket_prefix params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \ value.emit_interval elif attribute.lower() == 'connectiondraining': params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \ value.enabled and 'true' or 'false' params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \ value.timeout else: raise ValueError('InvalidAttribute', attribute) return self.get_status('ModifyLoadBalancerAttributes', params, verb='GET') def get_all_lb_attributes(self, load_balancer_name): """Gets all Attributes of a Load Balancer :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :rtype: boto.ec2.elb.attribute.LbAttributes :return: The attribute object of the ELB. """ from boto.ec2.elb.attributes import LbAttributes params = {'LoadBalancerName': load_balancer_name} return self.get_object('DescribeLoadBalancerAttributes', params, LbAttributes) def get_lb_attribute(self, load_balancer_name, attribute): """Gets an attribute of a Load Balancer This will make an EC2 call for each method call. :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :type attribute: string :param attribute: The attribute you wish to see. * accessLog - :py:class:`AccessLogAttribute` instance * crossZoneLoadBalancing - Boolean * connectionDraining - :py:class:`ConnectionDrainingAttribute` instance :rtype: Attribute dependent :return: The new value for the attribute """ attributes = self.get_all_lb_attributes(load_balancer_name) if attribute.lower() == 'accesslog': return attributes.access_log if attribute.lower() == 'crosszoneloadbalancing': return attributes.cross_zone_load_balancing.enabled if attribute.lower() == 'connectiondraining': return attributes.connection_draining return None def register_instances(self, load_balancer_name, instances): """ Add new Instances to an existing Load Balancer. :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :type instances: List of strings :param instances: The instance ID's of the EC2 instances to add. :rtype: List of strings :return: An updated list of instances for this Load Balancer. """ params = {'LoadBalancerName': load_balancer_name} self.build_list_params(params, instances, 'Instances.member.%d.InstanceId') return self.get_list('RegisterInstancesWithLoadBalancer', params, [('member', InstanceInfo)]) def deregister_instances(self, load_balancer_name, instances): """ Remove Instances from an existing Load Balancer. :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :type instances: List of strings :param instances: The instance ID's of the EC2 instances to remove. :rtype: List of strings :return: An updated list of instances for this Load Balancer. """ params = {'LoadBalancerName': load_balancer_name} self.build_list_params(params, instances, 'Instances.member.%d.InstanceId') return self.get_list('DeregisterInstancesFromLoadBalancer', params, [('member', InstanceInfo)]) def describe_instance_health(self, load_balancer_name, instances=None): """ Get current state of all Instances registered to an Load Balancer. :type load_balancer_name: string :param load_balancer_name: The name of the Load Balancer :type instances: List of strings :param instances: The instance ID's of the EC2 instances to return status for. If not provided, the state of all instances will be returned. :rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState` :return: list of state info for instances in this Load Balancer. """ params = {'LoadBalancerName': load_balancer_name} if instances: self.build_list_params(params, instances, 'Instances.member.%d.InstanceId') return self.get_list('DescribeInstanceHealth', params, [('member', InstanceState)]) def configure_health_check(self, name, health_check): """ Define a health check for the EndPoints. :type name: string :param name: The mnemonic name associated with the load balancer :type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck` :param health_check: A HealthCheck object populated with the desired values. :rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck` :return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck` """ params = {'LoadBalancerName': name, 'HealthCheck.Timeout': health_check.timeout, 'HealthCheck.Target': health_check.target, 'HealthCheck.Interval': health_check.interval, 'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold, 'HealthCheck.HealthyThreshold': health_check.healthy_threshold} return self.get_object('ConfigureHealthCheck', params, HealthCheck) def set_lb_listener_SSL_certificate(self, lb_name, lb_port, ssl_certificate_id): """ Sets the certificate that terminates the specified listener's SSL connections. The specified certificate replaces any prior certificate that was used on the same LoadBalancer and port. """ params = {'LoadBalancerName': lb_name, 'LoadBalancerPort': lb_port, 'SSLCertificateId': ssl_certificate_id} return self.get_status('SetLoadBalancerListenerSSLCertificate', params) def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name): """ Generates a stickiness policy with sticky session lifetimes that follow that of an application-generated cookie. This policy can only be associated with HTTP listeners. This policy is similar to the policy created by CreateLBCookieStickinessPolicy, except that the lifetime of the special Elastic Load Balancing cookie follows the lifetime of the application-generated cookie specified in the policy configuration. The load balancer only inserts a new stickiness cookie when the application response includes a new application cookie. If the application cookie is explicitly removed or expires, the session stops being sticky until a new application cookie is issued. """ params = {'CookieName': name, 'LoadBalancerName': lb_name, 'PolicyName': policy_name} return self.get_status('CreateAppCookieStickinessPolicy', params) def create_lb_cookie_stickiness_policy(self, cookie_expiration_period, lb_name, policy_name): """ Generates a stickiness policy with sticky session lifetimes controlled by the lifetime of the browser (user-agent) or a specified expiration period. This policy can only be associated only with HTTP listeners. When a load balancer implements this policy, the load balancer uses a special cookie to track the backend server instance for each request. When the load balancer receives a request, it first checks to see if this cookie is present in the request. If so, the load balancer sends the request to the application server specified in the cookie. If not, the load balancer sends the request to a server that is chosen based on the existing load balancing algorithm. A cookie is inserted into the response for binding subsequent requests from the same user to that server. The validity of the cookie is based on the cookie expiration time, which is specified in the policy configuration. None may be passed for cookie_expiration_period. """ params = {'LoadBalancerName': lb_name, 'PolicyName': policy_name} if cookie_expiration_period is not None: params['CookieExpirationPeriod'] = cookie_expiration_period return self.get_status('CreateLBCookieStickinessPolicy', params) def create_lb_policy(self, lb_name, policy_name, policy_type, policy_attributes): """ Creates a new policy that contais the necessary attributes depending on the policy type. Policies are settings that are saved for your load balancer and that can be applied to the front-end listener, or the back-end application server. """ params = {'LoadBalancerName': lb_name, 'PolicyName': policy_name, 'PolicyTypeName': policy_type} for index, (name, value) in enumerate(policy_attributes.iteritems(), 1): params['PolicyAttributes.member.%d.AttributeName' % index] = name params['PolicyAttributes.member.%d.AttributeValue' % index] = value else: params['PolicyAttributes'] = '' return self.get_status('CreateLoadBalancerPolicy', params) def delete_lb_policy(self, lb_name, policy_name): """ Deletes a policy from the LoadBalancer. The specified policy must not be enabled for any listeners. """ params = {'LoadBalancerName': lb_name, 'PolicyName': policy_name} return self.get_status('DeleteLoadBalancerPolicy', params) def set_lb_policies_of_listener(self, lb_name, lb_port, policies): """ Associates, updates, or disables a policy with a listener on the load balancer. Currently only zero (0) or one (1) policy can be associated with a listener. """ params = {'LoadBalancerName': lb_name, 'LoadBalancerPort': lb_port} if len(policies): self.build_list_params(params, policies, 'PolicyNames.member.%d') else: params['PolicyNames'] = '' return self.get_status('SetLoadBalancerPoliciesOfListener', params) def set_lb_policies_of_backend_server(self, lb_name, instance_port, policies): """ Replaces the current set of policies associated with a port on which the back-end server is listening with a new set of policies. """ params = {'LoadBalancerName': lb_name, 'InstancePort': instance_port} if policies: self.build_list_params(params, policies, 'PolicyNames.member.%d') else: params['PolicyNames'] = '' return self.get_status('SetLoadBalancerPoliciesForBackendServer', params) def apply_security_groups_to_lb(self, name, security_groups): """ Applies security groups to the load balancer. Applying security groups that are already registered with the Load Balancer has no effect. :type name: string :param name: The name of the Load Balancer :type security_groups: List of strings :param security_groups: The name of the security group(s) to add. :rtype: List of strings :return: An updated list of security groups for this Load Balancer. """ params = {'LoadBalancerName': name} self.build_list_params(params, security_groups, 'SecurityGroups.member.%d') return self.get_list('ApplySecurityGroupsToLoadBalancer', params, None) def attach_lb_to_subnets(self, name, subnets): """ Attaches load balancer to one or more subnets. Attaching subnets that are already registered with the Load Balancer has no effect. :type name: string :param name: The name of the Load Balancer :type subnets: List of strings :param subnets: The name of the subnet(s) to add. :rtype: List of strings :return: An updated list of subnets for this Load Balancer. """ params = {'LoadBalancerName': name} self.build_list_params(params, subnets, 'Subnets.member.%d') return self.get_list('AttachLoadBalancerToSubnets', params, None) def detach_lb_from_subnets(self, name, subnets): """ Detaches load balancer from one or more subnets. :type name: string :param name: The name of the Load Balancer :type subnets: List of strings :param subnets: The name of the subnet(s) to detach. :rtype: List of strings :return: An updated list of subnets for this Load Balancer. """ params = {'LoadBalancerName': name} self.build_list_params(params, subnets, 'Subnets.member.%d') return self.get_list('DetachLoadBalancerFromSubnets', params, None)
1.875
2
basis_set_exchange/cli/bse_cli.py
atomse/basis_set_exchange
0
3146
<reponame>atomse/basis_set_exchange ''' Command line interface for the basis set exchange ''' import argparse import argcomplete from .. import version from .bse_handlers import bse_cli_handle_subcmd from .check import cli_check_normalize_args from .complete import (cli_case_insensitive_validator, cli_family_completer, cli_role_completer, cli_bsname_completer, cli_write_fmt_completer, cli_read_fmt_completer, cli_reffmt_completer) def run_bse_cli(): ################################################################################################ # NOTE: I am deliberately not using the 'choices' argument in add_argument. I could use it # for formats, etc, however I wouldn't want to use it for basis set names. Therefore, I handle # all of that manually so that error output is consistent and clean ################################################################################################ ######################################## # Main global options ######################################## parser = argparse.ArgumentParser(description='Description of your program') parser.add_argument('-V', action='version', version='basis_set_exchange ' + version()) parser.add_argument('-d', '--data-dir', metavar='PATH', help='Override which data directory to use') parser.add_argument('-o', '--output', metavar='PATH', help='Output to given file rather than stdout') subparsers = parser.add_subparsers(metavar='subcommand', dest='subcmd') subparsers.required = True # https://bugs.python.org/issue9253#msg186387 ######################################## # Listing of data-independent info ######################################## # list-formats subcommand subp = subparsers.add_parser('list-formats', help='Output a list of basis set formats that can be used with obtaining a basis set') subp.add_argument('-n', '--no-description', action='store_true', help='Print only the format names') # list-writer-formats subcommand subp = subparsers.add_parser('list-writer-formats', help='Output a list available basis set formats that can be written') subp.add_argument('-n', '--no-description', action='store_true', help='Print only the format names') # list-reader-formats subp = subparsers.add_parser('list-reader-formats', help='Output a list of basis set formats that can be read') subp.add_argument('-n', '--no-description', action='store_true', help='Print only the format names') # list-ref-formats subcommand subp = subparsers.add_parser('list-ref-formats', help='Output a list all available reference formats and descriptions') subp.add_argument('-n', '--no-description', action='store_true', help='Print only the reference format names') # list-roles subcommand subp = subparsers.add_parser('list-roles', help='Output a list all available roles and descriptions') subp.add_argument('-n', '--no-description', action='store_true', help='Print only the role names') ######################################## # Listing of general info and metadata ######################################## # get-data-dir subparsers.add_parser('get-data-dir', help='Output the default data directory of this package') # list-basis-sets subcommand subp = subparsers.add_parser('list-basis-sets', help='Output a list all available basis sets and descriptions') subp.add_argument('-n', '--no-description', action='store_true', help='Print only the basis set names') subp.add_argument('-f', '--family', help='Limit the basis set list to only the specified family').completer = cli_family_completer subp.add_argument('-r', '--role', help='Limit the basis set list to only the specified role').completer = cli_role_completer subp.add_argument('-s', '--substr', help='Limit the basis set list to only basis sets whose name contains the specified substring') subp.add_argument('-e', '--elements', help='Limit the basis set list to only basis sets that contain all the given elements') # list-families subcommand subparsers.add_parser('list-families', help='Output a list all available basis set families') # lookup-by-role subp = subparsers.add_parser('lookup-by-role', help='Lookup a companion/auxiliary basis by primary basis and role') subp.add_argument('basis', help='Name of the primary basis we want the auxiliary basis for').completer = cli_bsname_completer subp.add_argument('role', help='Role of the auxiliary basis to look for').completer = cli_role_completer ################################# # Output of info ################################# # get-basis subcommand subp = subparsers.add_parser('get-basis', help='Output a formatted basis set') subp.add_argument('basis', help='Name of the basis set to output').completer = cli_bsname_completer subp.add_argument('fmt', help='Which format to output the basis set as').completer = cli_write_fmt_completer subp.add_argument('--elements', help='Which elements of the basis set to output. Default is all defined in the given basis') subp.add_argument('--version', help='Which version of the basis set to output. Default is the latest version') subp.add_argument('--noheader', action='store_true', help='Do not output the header at the top') subp.add_argument('--unc-gen', action='store_true', help='Remove general contractions') subp.add_argument('--unc-spdf', action='store_true', help='Remove combined sp, spd, ... contractions') subp.add_argument('--unc-seg', action='store_true', help='Remove general contractions') subp.add_argument('--opt-gen', action='store_true', help='Optimize general contractions') subp.add_argument('--make-gen', action='store_true', help='Make the basis set as generally-contracted as possible') # get-refs subcommand subp = subparsers.add_parser('get-refs', help='Output references for a basis set') subp.add_argument('basis', help='Name of the basis set to output the references for').completer = cli_bsname_completer subp.add_argument('reffmt', help='Which format to output the references as').completer = cli_reffmt_completer subp.add_argument('--elements', help='Which elements to output the references for. Default is all defined in the given basis.') subp.add_argument('--version', help='Which version of the basis set to get the references for') # get-info subcommand subp = subparsers.add_parser('get-info', help='Output general info and metadata for a basis set') subp.add_argument('basis', help='Name of the basis set to output the info for').completer = cli_bsname_completer # get-notes subcommand subp = subparsers.add_parser('get-notes', help='Output the notes for a basis set') subp.add_argument('basis', help='Name of the basis set to output the notes for').completer = cli_bsname_completer # get-family subcommand subp = subparsers.add_parser('get-family', help='Output the family of a basis set') subp.add_argument('basis', help='Name of the basis set to output the family for').completer = cli_bsname_completer # get-versions subcommand subp = subparsers.add_parser('get-versions', help='Output a list all available versions of a basis set') subp.add_argument('basis', help='Name of the basis set to list the versions of').completer = cli_bsname_completer subp.add_argument('-n', '--no-description', action='store_true', help='Print only the version numbers') # get-family-notes subcommand subp = subparsers.add_parser('get-family-notes', help='Get the notes of a family of basis sets') subp.add_argument('family', type=str.lower, help='The basis set family to the get the notes of').completer = cli_family_completer ################################# # Converting basis sets ################################# subp = subparsers.add_parser('convert-basis', help='Convert basis set files from one format to another') subp.add_argument('input_file', type=str, help='Basis set file to convert') subp.add_argument('output_file', type=str, help='Converted basis set file') subp.add_argument('--in-fmt', type=str, default=None, help='Input format (default: autodetected from input filename').completer = cli_read_fmt_completer subp.add_argument('--out-fmt', type=str, default=None, help='Output format (default: autodetected from output filename').completer = cli_write_fmt_completer ################################# # Creating bundles ################################# subp = subparsers.add_parser('create-bundle', help='Create a bundle of basis sets') subp.add_argument('fmt', help='Which format to output the basis set as').completer = cli_write_fmt_completer subp.add_argument('reffmt', help='Which format to output the references as').completer = cli_reffmt_completer subp.add_argument('bundle_file', help='Bundle/Archive file to create') subp.add_argument('--archive-type', help='Override the type of archive to create (zip or tbz)') ############################# # DONE WITH SUBCOMMANDS ############################# # setup autocomplete argcomplete.autocomplete(parser, validator=cli_case_insensitive_validator) # Now parse and handle the args args = parser.parse_args() # Check and make sure basis sets, roles, etc, are valid args = cli_check_normalize_args(args) # Actually generate the output output = bse_cli_handle_subcmd(args) if args.output: with open(args.output, 'w', encoding='utf-8') as outfile: outfile.write(output) else: print(output) return 0
2.109375
2
Backjoon/1929.py
hanjungwoo1/CodingTest
3
3147
""" 입력 예시 3 16 출력 예시 3 5 7 11 13 """ import math left, right = map(int, input().split()) array = [True for i in range(right+1)] array[1] = 0 for i in range(2, int(math.sqrt(right)) + 1): if array[i] == True: j = 2 while i * j <= right: array[i * j] = False j += 1 for i in range(left, right+1): if array[i]: print(i)
3.390625
3
tensorflow/tools/quantization/quantize_graph_test.py
tianyapiaozi/tensorflow
374
3148
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests the graph quantization script. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import numpy as np from tensorflow.core.framework import graph_pb2 from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.framework import ops as ops_lib from tensorflow.python.platform import flags as flags_lib from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.tools.quantization import quantize_graph flags = flags_lib FLAGS = flags.FLAGS def run_graph_def(graph_def, input_map, outputs): graph = ops_lib.Graph() with graph.as_default(): importer.import_graph_def(graph_def, input_map={}, name="") with session.Session(graph=graph) as sess: results = sess.run(outputs, feed_dict=input_map) return results def test_mat_mul(m, n, k, a, b): """Tests a MatMul replacement.""" a_constant_name = "a_constant" b_constant_name = "b_constant" mat_mul_name = "mat_mul" float_graph_def = graph_pb2.GraphDef() a_constant = quantize_graph.create_constant_node( a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k]) float_graph_def.node.extend([a_constant]) b_constant = quantize_graph.create_constant_node( b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n]) float_graph_def.node.extend([b_constant]) mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name, [a_constant_name, b_constant_name]) quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32) quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False) quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False) float_graph_def.node.extend([mat_mul_node]) test_graph(float_graph_def, {}, [mat_mul_name]) def test_conv(depth, image_width, image_height, image_batch_count, filter_size, filter_count, stride, padding, input_values, filter_values): """Tests a Conv replacement.""" input_constant_name = "input_constant" filter_constant_name = "filter_constant" conv_name = "conv" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=input_values, dtype=dtypes.float32, shape=[image_batch_count, image_height, image_width, depth]) float_graph_def.node.extend([input_constant]) filter_constant = quantize_graph.create_constant_node( filter_constant_name, value=filter_values, dtype=dtypes.float32, shape=[filter_size, filter_size, depth, filter_count]) float_graph_def.node.extend([filter_constant]) conv_node = quantize_graph.create_node( "Conv2D", conv_name, [input_constant_name, filter_constant_name]) quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32) quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1]) quantize_graph.set_attr_string(conv_node, "padding", padding) float_graph_def.node.extend([conv_node]) test_graph(float_graph_def, {}, [conv_name]) def are_tensors_near(a, b, tolerance): """Tests whether two tensors are nearly identical. This is a specialized comparison function designed to help debug problems with quantization. It prints out information about the differences between tensors on failure, paying special attention to possible biases by looking at the mean and absolute average errors. Args: a: First comparison tensor. b: Second comparison tensor. tolerance: Float value indicating how large an error between values is ok. Returns: Boolean indicating whether the two inputs were close enough. """ flat_a = a.flatten() flat_b = b.flatten() if len(flat_a) != len(flat_b): tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs " + str(len(flat_b))) return False value_count = len(flat_a) how_many_different = 0 total_difference = 0 total_abs_difference = 0 for index in range(value_count): a_value = flat_a[index] b_value = flat_b[index] difference = a_value - b_value total_difference += difference total_abs_difference += abs(difference) if abs(difference) > tolerance: how_many_different += 1 mean_difference = total_difference / value_count mean_abs_difference = total_abs_difference / value_count proportion_different = (how_many_different * 1.0) / value_count if how_many_different == 0: return True else: tf_logging.info("Tensors have {0} different values ({1}%), with mean" " difference {2} and mean absolute difference {3}".format( how_many_different, proportion_different * 100, mean_difference, mean_abs_difference)) return False def get_top_value(input_values): max_value = None max_index = None for index, value in enumerate(input_values.flatten()): if max_value is None or value > max: max_value = value max_index = index return max_index, max_value def test_graph(float_graph_def, input_map, output_names, log_graph=False): """Runs the float graph through the rewriter and tests the results.""" float_results = run_graph_def( float_graph_def, input_map, [output_name + ":0" for output_name in output_names]) # TODO(petewarden): round test is currently failing because there is no # RoundToSteps op available. # round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round") # round_graph_def = round_rewriter.rewrite(output_name) # round_results = run_graph_def(round_graph_def, input_map, # [output_name + ":0"]) # assert are_tensors_near(expected, round_results[0], 1.0) # # TODO(petewarden): Add test for "quantize" mode. eightbit_rewriter = quantize_graph.GraphRewriter( float_graph_def, "eightbit", quantized_input_range=None) eightbit_graph_def = eightbit_rewriter.rewrite(output_names) eightbit_results = run_graph_def( eightbit_graph_def, input_map, [output_name + ":0" for output_name in output_names]) for expected, result in zip(float_results, eightbit_results): assert are_tensors_near(expected, result, 1.0) if log_graph: tf_logging.info("8bit:\n%s", str(eightbit_graph_def)) # Test the weights_rounded mode. This uses the default bit_depth. weights_rounded_rewriter = quantize_graph.GraphRewriter( float_graph_def, "weights_rounded", quantized_input_range=None) weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names) weights_rounded_results = run_graph_def( weights_rounded_graph_def, input_map, [output_name + ":0" for output_name in output_names]) for expected, result in zip(float_results, weights_rounded_results): assert are_tensors_near(expected, result, 1.0) class QuantizeGraphTest(test.TestCase): def test_negative_const_problem(self): shape_constant_name = "shape_constant" shape_constant = quantize_graph.create_constant_node( shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1]) quantization_result = quantize_graph.quantize_weight_eightbit( shape_constant, b"MIN_COMBINED") self.assertEqual(4, len(quantization_result)) def test_odd_padding_problem(self): """Tests one error case we ran into in a real graph.""" test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], [1, 2, 3, 4, 5, 6, 7, 8, 9]) def test_mat_mul_tiny(self): # These tests are added to test the generate case where # min(matrix) == max(matrix), which used to cause problems. test_mat_mul(1, 1, 1, [2], [3]) test_mat_mul(1, 2, 1, [1], [2, 3]) test_mat_mul(1, 1, 2, [1, 1], [1, 1]) test_mat_mul(1, 1, 2, [0, 0], [1, 1]) # The general case. test_mat_mul(1, 1, 2, [1, 2], [1, 2]) def test_mat_mul_small(self): test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) def test_conv(self): test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], [1, 4, 7, 2, 5, 8, 3, 6, 9]) def test_reshape(self): """Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize.""" def make_matmul(name, a, b): n = quantize_graph.create_node("MatMul", name, [a.name, b.name]) quantize_graph.set_attr_dtype(n, "T", dtypes.float32) quantize_graph.set_attr_bool(n, "transpose_a", False) quantize_graph.set_attr_bool(n, "transpose_b", False) return n # matmul_1 = input*weight_1 input_node = quantize_graph.create_constant_node( "input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1]) weight_1_node = quantize_graph.create_constant_node( "weight_1", value=[.5, .6, .7, .8, .9], dtype=dtypes.float32, shape=[1, 5]) matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node) # Reshape 4x5 to 10x2. new_shape_node = quantize_graph.create_constant_node( "new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2]) reshape_node = quantize_graph.create_node( "Reshape", "reshape", [matmul_1_node.name, new_shape_node.name]) quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32) # matmul_2_node = reshape*weight_2 weight_2_node = quantize_graph.create_constant_node( "weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1]) matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node) g = graph_pb2.GraphDef() g.node.extend([ input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node, weight_2_node, matmul_2_node ]) # Test the graph test_graph(g, {}, ["matmul_2"]) # Verify there is only one Quantize and one Requantize op. eightbit_rewriter = quantize_graph.GraphRewriter( g, "eightbit", quantized_input_range=None) eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"]) ops = [node.op for node in eightbit_graph_def.node] # No quantize since all inputs are const and can be quantized up-front. self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize")) self.assertEqual(1, ops.count("QuantizedReshape")) # One dequantize at the end. self.assertEqual(1, ops.count("Dequantize")) def test_quantize_array(self): # Test invalid parameters (empty array, or 0 buckets. self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]), 2) self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([1, 2]), 0) # Test input array of length 1. arr = np.array([1]) qarr = quantize_graph.quantize_array(arr, 1) self.assertEqual(arr, qarr) qarr = quantize_graph.quantize_array(arr, 2) self.assertEqual(arr, qarr) # Test input array with all elements equal. arr = np.array([1, 1, 1]) qarr = quantize_graph.quantize_array(arr, 10) self.assertTrue((np.array([1, 1, 1]) == qarr).all()) # Test "normal" input arrays. arr = np.array([0, 0.3, 0.6, 1]) qarr = quantize_graph.quantize_array(arr, 1) self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all()) qarr = quantize_graph.quantize_array(arr, 2) self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all()) qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2) self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all()) def test_non_float_concat(self): concat_dim = quantize_graph.create_constant_node( "concat_dim", value=0, dtype=dtypes.int32, shape=[]) a = quantize_graph.create_constant_node( "a", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.int32, shape=[2, 2, 3]) b = quantize_graph.create_constant_node( "b", value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24], dtype=dtypes.int32, shape=[2, 2, 3]) concat = quantize_graph.create_node("Concat", "concat", [concat_dim.name, a.name, b.name]) quantize_graph.set_attr_int(concat, "N", 2) quantize_graph.set_attr_dtype(concat, "T", dtypes.int32) g = graph_pb2.GraphDef() g.node.extend([concat_dim, a, b, concat]) test_graph(g, {}, [concat.name]) def test_non_float_reshape(self): a = quantize_graph.create_constant_node( "a", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.int32, shape=[2, 2, 3]) shape = quantize_graph.create_constant_node( "shape", value=[12], dtype=dtypes.int32, shape=[1]) reshape = quantize_graph.create_node("Reshape", "reshape", [a.name, shape.name]) quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32) g = graph_pb2.GraphDef() g.node.extend([a, shape, reshape]) test_graph(g, {}, [reshape.name]) def test_concat(self): shape_constant_name = "shape_constant" a_constant_name = "a_constant" b_constant_name = "b_constant" concat_name = "concat" float_graph_def = graph_pb2.GraphDef() shape_constant = quantize_graph.create_constant_node( shape_constant_name, value=0, dtype=dtypes.int32, shape=[]) float_graph_def.node.extend([shape_constant]) a_constant = quantize_graph.create_constant_node( a_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 2, 3]) float_graph_def.node.extend([a_constant]) b_constant = quantize_graph.create_constant_node( b_constant_name, value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24], dtype=dtypes.float32, shape=[2, 2, 3]) float_graph_def.node.extend([b_constant]) concat_node = quantize_graph.create_node( "Concat", concat_name, [shape_constant_name, a_constant_name, b_constant_name]) quantize_graph.set_attr_int(concat_node, "N", 2) quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32) float_graph_def.node.extend([concat_node]) test_graph(float_graph_def, {}, [concat_name]) # Verify the concat is quantized. eightbit_rewriter = quantize_graph.GraphRewriter( float_graph_def, "eightbit", quantized_input_range=None) eightbit_graph_def = eightbit_rewriter.rewrite([concat_name]) ops = [node.op for node in eightbit_graph_def.node] self.assertEqual(1, ops.count("QuantizedConcat")) def test_multiple_outputs(self): input_constant_name = "input_constant" split_constant_name = "split_constant" split_name = "split" concat_constant_name = "concat_constant" concat_name = "concat" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 6]) float_graph_def.node.extend([input_constant]) split_constant = quantize_graph.create_constant_node( split_constant_name, value=1, dtype=dtypes.int32, shape=[]) float_graph_def.node.extend([split_constant]) split_node = quantize_graph.create_node( "Split", split_name, [split_constant_name, input_constant_name]) quantize_graph.set_attr_int(split_node, "num_split", 2) quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32) float_graph_def.node.extend([split_node]) concat_constant = quantize_graph.create_constant_node( concat_constant_name, value=1, dtype=dtypes.int32, shape=[]) float_graph_def.node.extend([concat_constant]) concat_node = quantize_graph.create_node( "Concat", concat_name, [concat_constant_name, split_name + ":0", split_name + ":1"]) quantize_graph.set_attr_int(concat_node, "N", 2) quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32) float_graph_def.node.extend([concat_node]) test_graph(float_graph_def, {}, [concat_name]) def test_node_name_from_input(self): self.assertEqual("SomeName", quantize_graph.node_name_from_input("^SomeName:2")) def test_unique_node_name_from_input(self): self.assertEqual("__hat__SomeName__port__2", quantize_graph.unique_node_name_from_input("^SomeName:2")) def test_identity(self): input_constant_name = "input_constant" identity_name = "identity" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[2, 6]) float_graph_def.node.extend([input_constant]) identity_node = quantize_graph.create_node("Identity", identity_name, [input_constant_name]) quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32) float_graph_def.node.extend([identity_node]) mul_name = "mul" mul_node = quantize_graph.create_node("Mul", mul_name, [identity_name, identity_name]) quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32) float_graph_def.node.extend([mul_node]) test_graph(float_graph_def, {}, [mul_name]) def test_keep_control_edges(self): no_op_name = "no_op" a_constant_name = "a_constant" b_constant_name = "b_constant" a_check_name = "a_check" b_check_name = "b_check" a_identity_name = "a_identity" b_identity_name = "b_identity" add_name = "add" graph_def = graph_pb2.GraphDef() no_op = quantize_graph.create_node("NoOp", no_op_name, []) graph_def.node.extend([no_op]) a_constant = quantize_graph.create_constant_node( a_constant_name, value=1, dtype=dtypes.float32, shape=[]) graph_def.node.extend([a_constant]) a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name, [a_constant_name]) graph_def.node.extend([a_check_node]) a_identity_node = quantize_graph.create_node( "Identity", a_identity_name, [a_constant_name, "^" + a_check_name, "^" + no_op_name]) graph_def.node.extend([a_identity_node]) b_constant = quantize_graph.create_constant_node( b_constant_name, value=1, dtype=dtypes.float32, shape=[]) graph_def.node.extend([b_constant]) b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name, [b_constant_name]) graph_def.node.extend([b_check_node]) b_identity_node = quantize_graph.create_node( "Identity", b_identity_name, [b_constant_name, "^" + b_check_name]) graph_def.node.extend([b_identity_node]) add_node = quantize_graph.create_node("Add", add_name, [a_identity_name, b_identity_name]) quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32) graph_def.node.extend([add_node]) expected_output = graph_pb2.GraphDef() no_op = quantize_graph.create_node("NoOp", no_op_name, []) expected_output.node.extend([no_op]) a_constant = quantize_graph.create_constant_node( a_constant_name, value=1, dtype=dtypes.float32, shape=[]) expected_output.node.extend([a_constant]) a_identity_node = quantize_graph.create_node( "Identity", a_identity_name, [a_constant_name, "^" + no_op_name]) expected_output.node.extend([a_identity_node]) b_constant = quantize_graph.create_constant_node( b_constant_name, value=1, dtype=dtypes.float32, shape=[]) expected_output.node.extend([b_constant]) add_node = quantize_graph.create_node("Add", add_name, [a_identity_name, b_constant_name]) quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32) expected_output.node.extend([add_node]) expected_output.versions.CopyFrom(graph_def.versions) expected_output.library.CopyFrom(graph_def.library) output = graph_util.remove_training_nodes(graph_def) stripped_output = graph_util.extract_sub_graph(output, [add_name]) self.assertProtoEquals(expected_output, stripped_output) def test_batch_norm(self): input_constant_name = "input_constant" mean_constant_name = "mean_constant" variance_constant_name = "variance_constant" beta_constant_name = "beta_constant" gamma_constant_name = "gamma_constant" batch_norm_name = "batch_norm" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6], dtype=dtypes.float32, shape=[1, 1, 6, 2]) float_graph_def.node.extend([input_constant]) mean_constant = quantize_graph.create_constant_node( mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2]) float_graph_def.node.extend([mean_constant]) variance_constant = quantize_graph.create_constant_node( variance_constant_name, value=[0.25, 0.5], dtype=dtypes.float32, shape=[2]) float_graph_def.node.extend([variance_constant]) beta_constant = quantize_graph.create_constant_node( beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2]) float_graph_def.node.extend([beta_constant]) gamma_constant = quantize_graph.create_constant_node( gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2]) float_graph_def.node.extend([gamma_constant]) batch_norm_node = quantize_graph.create_node( "BatchNormWithGlobalNormalization", batch_norm_name, [ input_constant_name, mean_constant_name, variance_constant_name, beta_constant_name, gamma_constant_name ]) quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32) quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization", False) quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001) float_graph_def.node.extend([batch_norm_node]) test_graph(float_graph_def, {}, [batch_norm_name]) def test_max_pool(self): input_constant_name = "input_constant" max_pool_name = "max_pool" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1]) float_graph_def.node.extend([input_constant]) max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name, [input_constant_name]) quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1]) quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1]) quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME") float_graph_def.node.extend([max_pool_node]) test_graph(float_graph_def, {}, [max_pool_name]) def test_avg_pool(self): input_constant_name = "input_constant" avg_pool_name = "avg_pool" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1]) float_graph_def.node.extend([input_constant]) avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name, [input_constant_name]) quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32) quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1]) quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1]) quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME") float_graph_def.node.extend([avg_pool_node]) test_graph(float_graph_def, {}, [avg_pool_name]) def test_relu(self): input_constant_name = "input_constant" relu_name = "relu" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1]) float_graph_def.node.extend([input_constant]) relu_node = quantize_graph.create_node("Relu", relu_name, [input_constant_name]) quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32) float_graph_def.node.extend([relu_node]) test_graph(float_graph_def, {}, [relu_name]) def test_relu_w_fake_quant_w_min_max_vars(self): input_node = quantize_graph.create_constant_node( "input", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1]) relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name]) quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32) min_node = quantize_graph.create_constant_node( "min_bias_add", value=0, dtype=dtypes.float32, shape=[]) max_node = quantize_graph.create_constant_node( "max_bias_add", value=12, dtype=dtypes.float32, shape=[]) fake_quant_node = quantize_graph.create_node( "FakeQuantWithMinMaxVars", "fake_quant", [relu_node.name, min_node.name, max_node.name]) float_graph_def = graph_pb2.GraphDef() float_graph_def.node.extend( [input_node, relu_node, min_node, max_node, fake_quant_node]) test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True) # Verify there is only one Quantize and one Requantize op. eightbit_rewriter = quantize_graph.GraphRewriter( float_graph_def, "eightbit", quantized_input_range=None) eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name]) ops = [node.op for node in eightbit_graph_def.node] # No quantize since all inputs are const and can be quantized up-front. self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize")) # One dequantize at the end. self.assertEqual(1, ops.count("Dequantize")) def test_relu6(self): input_constant_name = "input_constant" relu6_name = "relu6" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 2, 6, 1]) float_graph_def.node.extend([input_constant]) relu6_node = quantize_graph.create_node("Relu6", relu6_name, [input_constant_name]) quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32) float_graph_def.node.extend([relu6_node]) test_graph(float_graph_def, {}, [relu6_name]) def test_bias_add(self): input_constant_name = "input_constant" offset_constant_name = "offset_constant" bias_add_name = "bias_add" float_graph_def = graph_pb2.GraphDef() input_constant = quantize_graph.create_constant_node( input_constant_name, value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtypes.float32, shape=[1, 1, 2, 6]) float_graph_def.node.extend([input_constant]) offset_constant = quantize_graph.create_constant_node( offset_constant_name, value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6]) float_graph_def.node.extend([offset_constant]) bias_add_node = quantize_graph.create_node( "BiasAdd", bias_add_name, [input_constant_name, offset_constant_name]) quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32) float_graph_def.node.extend([bias_add_node]) test_graph(float_graph_def, {}, [bias_add_name]) def test_quantized_input_range_errors(self): with self.assertRaises(ValueError): # Invalid mode. quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded", [0, 1]) with self.assertRaises(ValueError): # Invalid range. quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1]) def test_quantized_input_range_bias_add(self): input_shape = [1, 1, 2, 6] input_n = quantize_graph.create_node("Placeholder", "input", []) quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32) quantize_graph.set_attr_shape(input_n, "shape", input_shape) offset_n = quantize_graph.create_constant_node( "offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6]) bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add", [input_n.name, offset_n.name]) quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32) float_graph_def = graph_pb2.GraphDef() float_graph_def.node.extend([input_n, offset_n, bias_add_n]) input_map = { input_n.name + ":0": np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape) } self._RunTestsForQuantizedInputRange(float_graph_def, input_map, [bias_add_n.name], [-1, 20.]) self._RunTestsForQuantizedInputRange(float_graph_def, input_map, [bias_add_n.name], [0, 12.]) def test_quantized_input_range_mat_mul(self): shapes = [[3, 2], [2, 4]] inputs = [] for i, shape in enumerate(shapes): node = quantize_graph.create_node("Placeholder", "input_%s" % i, []) quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32) quantize_graph.set_attr_shape(node, "shape", shape) inputs.append(node) mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul", [n.name for n in inputs]) quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32) float_graph_def = graph_pb2.GraphDef() float_graph_def.node.extend(inputs + [mat_mul_node]) input_map = { inputs[0].name + ":0": np.reshape([1, 2, 3, 4, 5, 6], shapes[0]), inputs[1].name + ":0": np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1]) } self._RunTestsForQuantizedInputRange(float_graph_def, input_map, [mat_mul_node.name], [-1, 20.]) self._RunTestsForQuantizedInputRange(float_graph_def, input_map, [mat_mul_node.name], [0, 6.]) def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map, output_names, input_range): if sys.version_info[0] == 3: # uint8->quint8 conversion for numpy is not working currently. return quantized_input_map = {} for k, v in input_map.items(): arr = [ int( round((n - input_range[0]) * 255 / (input_range[1] - input_range[ 0]))) for n in v.flat ] arr = np.array(arr, np.uint8) arr = arr.reshape(v.shape) arr = arr.astype(dtypes.quint8.as_numpy_dtype) quantized_input_map[k] = arr output_tensors = [output_name + ":0" for output_name in output_names] float_results = run_graph_def(float_graph_def, input_map, output_tensors) # Quantize treating the input as quantized in range <input_range>. rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit", input_range) graph_def = rewriter.rewrite(output_names) results = run_graph_def(graph_def, quantized_input_map, output_tensors) for expected, result in zip(float_results, results): assert are_tensors_near(expected, result, .5) ops = [node.op for node in graph_def.node] self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize")) self.assertEqual(len(output_names), ops.count("Dequantize")) # Quantize without treating input as quantized. rewriter = quantize_graph.GraphRewriter( float_graph_def, "eightbit", quantized_input_range=None) graph_def = rewriter.rewrite(output_names) results = run_graph_def(graph_def, input_map, output_tensors) for expected, result in zip(float_results, results): assert are_tensors_near(expected, result, .5) ops = [node.op for node in graph_def.node] self.assertEqual( len(input_map), ops.count("QuantizeV2") + ops.count("Quantize")) self.assertEqual(len(output_names), ops.count("Dequantize")) def test_bias_add_w_fake_quant_w_min_max_vars(self): input_node = quantize_graph.create_constant_node( "input", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtypes.float32, shape=[1, 1, 2, 5]) offset_node = quantize_graph.create_constant_node( "offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5]) bias_add_node = quantize_graph.create_node( "BiasAdd", "bias_add", [input_node.name, offset_node.name]) quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32) min_node = quantize_graph.create_constant_node( "min_bias_add", value=-.5, dtype=dtypes.float32, shape=[]) max_node = quantize_graph.create_constant_node( "max_bias_add", value=15.5, dtype=dtypes.float32, shape=[]) fake_quant_node = quantize_graph.create_node( "FakeQuantWithMinMaxVars", "fake_quant", [bias_add_node.name, min_node.name, max_node.name]) float_graph_def = graph_pb2.GraphDef() float_graph_def.node.extend([ input_node, offset_node, bias_add_node, min_node, max_node, fake_quant_node ]) test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True) # Verify there is only one Quantize and one Requantize op. # Pass in fallback_quantization_range, although it will have no effect # because the FakeQuantWithMinMaxVars are used instead. eightbit_rewriter = quantize_graph.GraphRewriter( float_graph_def, "eightbit", quantized_input_range=None, fallback_quantization_range=[-100, 100]) eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name]) ops = [node.op for node in eightbit_graph_def.node] node_names = [node.name for node in eightbit_graph_def.node] # No quantize since all inputs are const and can be quantized up-front. self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize")) # One dequantize at the end. self.assertEqual(1, ops.count("Dequantize")) # The fallback constants are not in the graph. self.assertEqual(0, node_names.count("fallback_quantization_min_value")) self.assertEqual(0, node_names.count("fallback_quantization_max_value")) def test_bias_add_w_fallback_min_max_vars(self): input_node = quantize_graph.create_constant_node( "input", value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtypes.float32, shape=[1, 1, 2, 5]) offset_node = quantize_graph.create_constant_node( "offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5]) bias_add_node = quantize_graph.create_node( "BiasAdd", "bias_add", [input_node.name, offset_node.name]) quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32) float_graph_def = graph_pb2.GraphDef() float_graph_def.node.extend([input_node, offset_node, bias_add_node]) test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True) # Verify there is only one Quantize, one Requantize op, and no # RequantizationRange op. eightbit_rewriter = quantize_graph.GraphRewriter( float_graph_def, "eightbit", quantized_input_range=None, fallback_quantization_range=[-.5, 15.5]) eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name]) ops = [node.op for node in eightbit_graph_def.node] node_names = [node.name for node in eightbit_graph_def.node] # No quantize since all inputs are const and can be quantized up-front. self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize")) # One dequantize at the end. self.assertEqual(1, ops.count("Dequantize")) # No RequantizationRange self.assertEqual(0, ops.count("RequantizationRange")) # The fallback constants are in the graph. self.assertEqual(1, node_names.count("fallback_quantization_min_value")) self.assertEqual(1, node_names.count("fallback_quantization_max_value")) def test_remove_redundant_quantization(self): a_constant_name = "a_constant" a_constant_min_name = "a_constant_min" a_constant_max_name = "a_constant_max" a_dequantize_name = "a_dequantize" a_quantize_name = "a_quantize" b_constant_name = "b_constant" b_constant_min_name = "b_constant_min" b_constant_max_name = "b_constant_max" b_dequantize_name = "b_dequantize" b_quantize_name = "b_quantize" mat_mul_name = "mat_mul" graph_def = graph_pb2.GraphDef() a_constant = quantize_graph.create_constant_node( a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[]) graph_def.node.extend([a_constant]) a_constant_min = quantize_graph.create_constant_node( a_constant_min_name, value=2, dtype=dtypes.float32, shape=[]) graph_def.node.extend([a_constant_min]) a_constant_max = quantize_graph.create_constant_node( a_constant_max_name, value=2, dtype=dtypes.float32, shape=[]) graph_def.node.extend([a_constant_max]) a_dequantize_node = quantize_graph.create_node( "Dequantize", a_dequantize_name, [a_constant_name, a_constant_min_name, a_constant_max_name]) quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8) graph_def.node.extend([a_dequantize_node]) a_quantize_node = quantize_graph.create_node( "QuantizeV2", a_quantize_name, [a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"]) quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8) graph_def.node.extend([a_quantize_node]) b_constant = quantize_graph.create_constant_node( b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[]) graph_def.node.extend([b_constant]) b_constant_min = quantize_graph.create_constant_node( b_constant_min_name, value=3, dtype=dtypes.float32, shape=[]) graph_def.node.extend([b_constant_min]) b_constant_max = quantize_graph.create_constant_node( b_constant_max_name, value=3, dtype=dtypes.float32, shape=[]) graph_def.node.extend([b_constant_max]) b_dequantize_node = quantize_graph.create_node( "Dequantize", b_dequantize_name, [b_constant_name, b_constant_min_name, b_constant_max_name]) quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8) graph_def.node.extend([b_dequantize_node]) b_quantize_node = quantize_graph.create_node( "QuantizeV2", b_quantize_name, [b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"]) quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8) graph_def.node.extend([b_quantize_node]) mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [ a_quantize_name, b_quantize_name, a_quantize_name + ":1", a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2" ]) quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8) quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32) graph_def.node.extend([mat_mul_node]) expected_output = graph_pb2.GraphDef() a_constant = quantize_graph.create_constant_node( a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[]) expected_output.node.extend([a_constant]) a_constant_min = quantize_graph.create_constant_node( a_constant_min_name, value=2, dtype=dtypes.float32, shape=[]) expected_output.node.extend([a_constant_min]) a_constant_max = quantize_graph.create_constant_node( a_constant_max_name, value=2, dtype=dtypes.float32, shape=[]) expected_output.node.extend([a_constant_max]) b_constant = quantize_graph.create_constant_node( b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[]) expected_output.node.extend([b_constant]) b_constant_min = quantize_graph.create_constant_node( b_constant_min_name, value=3, dtype=dtypes.float32, shape=[]) expected_output.node.extend([b_constant_min]) b_constant_max = quantize_graph.create_constant_node( b_constant_max_name, value=3, dtype=dtypes.float32, shape=[]) expected_output.node.extend([b_constant_max]) mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [ a_constant_name, b_constant_name, a_constant_min_name, a_constant_max_name, b_constant_min_name, b_constant_max_name ]) quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8) quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32) expected_output.node.extend([mat_mul_node]) expected_output.versions.CopyFrom(graph_def.versions) expected_output.library.CopyFrom(graph_def.library) rewriter = quantize_graph.GraphRewriter( graph_def, [mat_mul_name], quantized_input_range=None) output = rewriter.remove_redundant_quantization(graph_def) stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name]) self.assertProtoEquals(expected_output, stripped_output) if __name__ == "__main__": test.main()
2.078125
2
layerserver/migrations/0001_initial.py
aroiginfraplan/giscube-admin
5
3149
# -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-04-26 09:14 import colorfield.fields from django.db import migrations, models import django.db.models.deletion import giscube.utils class Migration(migrations.Migration): initial = True dependencies = [ ('giscube', '0002_update'), ] operations = [ migrations.CreateModel( name='GeoJsonLayer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50, unique=True)), ('title', models.CharField(blank=True, max_length=100, null=True)), ('description', models.TextField(blank=True, null=True)), ('keywords', models.CharField(blank=True, max_length=200, null=True)), ('active', models.BooleanField(default=True)), ('visibility', models.CharField(choices=[('private', 'Private'), ('public', 'Public')], default='private', max_length=10)), ('visible_on_geoportal', models.BooleanField(default=False)), ('shapetype', models.CharField(blank=True, choices=[('marker', 'Marker'), ('line', 'Line'), ('polygon', 'Polygon'), ('Circle', 'Circle')], max_length=20, null=True)), ('shape_radius', models.IntegerField(blank=True, null=True)), ('stroke_color', colorfield.fields.ColorField(blank=True, default=b'#FF3333', max_length=18, null=True)), ('stroke_width', models.IntegerField(blank=True, default=1, null=True)), ('stroke_dash_array', models.CharField(blank=True, default='', max_length=25, null=True)), ('fill_color', colorfield.fields.ColorField(blank=True, default=b'#FFC300', max_length=18, null=True)), ('fill_opacity', models.DecimalField(blank=True, decimal_places=1, default=1, max_digits=2, null=True)), ('url', models.CharField(blank=True, max_length=100, null=True)), ('data_file', models.FileField(blank=True, null=True, upload_to=giscube.utils.unique_service_directory)), ('service_path', models.CharField(max_length=255)), ('cache_time', models.IntegerField(blank=True, null=True)), ('last_fetch_on', models.DateField(blank=True, null=True)), ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='giscube.Category')), ], options={ 'verbose_name': 'GeoJSONLayer', 'verbose_name_plural': 'GeoJSONLayers', }, ), ]
1.726563
2
SETTINGS.py
pirica/fortnite-leaks-image-generator
5
3150
<filename>SETTINGS.py backgroundurl = "https://storage.needpix.com/rsynced_images/colored-background.jpg" # <- Need to be a Image URL!!! lang = "en" # <- language code displayset = True # <- Display the Set of the Item raritytext = True # <- Display the Rarity of the Item typeconfig = { "BannerToken": True, "AthenaBackpack": True, "AthenaPetCarrier": True, "AthenaPet": True, "AthenaPickaxe": True, "AthenaCharacter": True, "AthenaSkyDiveContrail": True, "AthenaGlider": True, "AthenaDance": True, "AthenaEmoji": True, "AthenaLoadingScreen": True, "AthenaMusicPack": True, "AthenaSpray": True, "AthenaToy": True, "AthenaBattleBus": True, "AthenaItemWrap": True } interval = 5 # <- Time (in seconds) until the bot checks for leaks again | Recommend: 7 watermark = "" # <- Leave it empty if you dont want one watermarksize = 25 # <- Size of the Watermark
2.203125
2
src/healthvaultlib/tests/testbase.py
rajeevs1992/pyhealthvault
1
3151
<filename>src/healthvaultlib/tests/testbase.py import unittest import settings from healthvaultlib.helpers.connection import Connection class TestBase(unittest.TestCase): def setUp(self): self.connection = self.get_connection() def get_connection(self): conn = Connection(settings.HV_APPID, settings.HV_SERVICE_SERVER) conn.thumbprint = settings.APP_THUMBPRINT conn.publickey = settings.APP_PUBLIC_KEY conn.privatekey = settings.APP_PRIVATE_KEY conn.connect() conn.set_person_and_record(settings.OFFLINE_PERSON_ID, settings.OFFLINE_RECORD_ID) return conn
2.234375
2
apps/extensions/migrations/0012_imports_path_urlfield_to_charfield.py
StepicOrg/stepik-apps
5
3152
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-06-09 03:01 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('extensions', '0011_auto_20170502_0908'), ] operations = [ migrations.AlterField( model_name='extension', name='imports_path', field=models.CharField(default='imports/', max_length=255), ), ]
1.40625
1
regtests/bench/thread_collision.py
secureosv/pythia
17
3153
''' multi-threading (python3 version) https://docs.python.org/3/library/threading.html ''' from time import clock import threading THREADS=2 lock = threading.Lock() A = 0 B = 0 C = 0 def test_globals(): global A, B, C for i in range(1024*1024): lock.acquire() A += 1 B += 2 C = A + B lock.release() def main(): print( 'starting threading test') starttime = clock() threads = [] for i in range(THREADS): t = threading.Thread( target=test_globals, args=() ) t.start() threads.append( t ) for t in threads: t.join() print( clock()-starttime) print('A:', A) print('B:', B) print('C:', C) main()
3.765625
4
game/board.py
scooler/checkers
0
3154
import numpy as np class Board: """ 0 - black 1 - white """ def __init__(self): board = [ [0, 1] * 4, [1, 0] * 4 ] * 4 players_board = [ [0, 1] * 4, # player 1 [1, 0] * 4 ] + [[0] * 8] * 4 + [ # 4 rows of nothing [0, 2] * 4, # player 2 [2, 0] * 4 ] self.board = np.array(board) self.players_board = np.array(players_board) self.x_size = 8 self.y_size = 8 # def move(self, x, y, current_player): # self.board[x, y] = current_player # def are_same_and_non_zero(self, array): # return np.unique(array).size == 1 and array[0] != 0 # def is_board_full(self): # return not np.any(np.unique(self.board) == 0) def is_finished(self): """is game finished""" return True # for i in range(0, self.x_size): # rows # if self.are_same_and_non_zero(self.board[i, :]): # self.player_who_won = self.board[i, 0] # self.result = 'Won {} - row {}'.format(self.player(self.player_who_won), i) # return True # for i in range(0, self.y_size): # columns # if self.are_same_and_non_zero(self.board[:, i]): # self.player_who_won = self.board[0, i] # self.result = 'Won {} - col {}'.format(self.player(self.player_who_won), i) # return True # if self.are_same_and_non_zero(np.diag(self.board)): # diagonal # self.player_who_won = self.board[1, 1] # self.result = 'Won {} - diagonal {}'.format(self.player(self.player_who_won), i) # return True # if self.are_same_and_non_zero(np.diag(np.flipud(self.board))): # anty-diagonal # self.player_who_won = self.board[1, 1] # self.result = 'Won {} - anty-diagonal {}'.format(self.player(self.player_who_won), i) # return True # if self.is_board_full(): # self.player_who_won = 0 # nobody # self.result = 'Draw' # return True # draw return False def show(self): # print(self.board) # print(self.players_board) return # def player(self, player_no): # if player_no == 1: return 'Player 1 (X)' # if player_no == 2: return 'Player 2 (O)' # def show_player_info(self, player_no): # print("It's turn of ", self.player(player_no))
3.546875
4
utils/get_season_things_price.py
vogelfenx/storagebot
0
3155
def get_season_things_price(thing, amount, price): if thing == 'wheel': wheel_price = price[thing]['month'] * amount return f'Стоимость составит {wheel_price}/месяц' else: other_thing_price_week = price[thing]['week'] * amount other_thing_price_month = price[thing]['month'] * amount return f'Стоимость составит {other_thing_price_week} р./неделю' + \ f' или {other_thing_price_month} р./месяц'
3.453125
3
zge/engine.py
zhester/zge
0
3156
<reponame>zhester/zge """ Zoe Game Engine Core Implementation =================================== Requirements ------------ [pygame](http://www.pygame.org/) """ # core packages # third-party packages import pygame # local package import layer __version__ = '0.0.0' #============================================================================= class Engine( object ): """ Simple game engine object. """ #========================================================================= def __init__( self, size ): """ Initializes an Engine object. """ # pygame initialization pygame.init() # initialize the root display surface self.window = pygame.display.set_mode( size, 0, 32 ) # set the title bar text and iconification text pygame.display.set_caption( 'Demonstration', 'Demo' ) # set the application icon icon = pygame.image.load( '../assets/z32.png' ) pygame.display.set_icon( icon ) # create a list of normal display layers self._layers = [] # create a transparent "top" layer for overlayed information self._top = layer.TextLayer() # initialize last tick value self._last_tick = pygame.time.get_ticks() self._last_wait = 0 # set an FPS cap self._fps = 0.0 self._fps_limit = 120.0 self._tick_step = int( round( 1000.0 / self._fps_limit ) ) # engine is currently running self._is_running = False # short debug string for various things self._debug = '' #========================================================================= def run( self ): """ Run the game loop (does not return until the application quits). """ # update tick value before entering the loop self._last_tick = pygame.time.get_ticks() # execute infinite application loop self._is_running = True while self._is_running: # process event queue for event in pygame.event.get(): # check for quit event if event.type == pygame.QUIT: self._is_running = False # check for key event elif ( event.type == pygame.KEYDOWN ) \ or ( event.type == pygame.KEYUP ) : self.trigger_key_event( event ) # exit application loop if done if self._is_running == False: break # update the game display self.update() # ZIH - simulate hard work #pygame.time.delay( 3 ) # compute duration of last event/render loop end_tick = pygame.time.get_ticks() delta = end_tick - self._last_tick self._last_tick = end_tick # update FPS value if delta > 0: self._fps = 1000.0 / float( delta ) else: self._fps = self._fps_limit # compute remaining time available inside this iteration if delta < self._tick_step: self._last_wait = self._tick_step - delta else: self._last_wait = 0 # let the OS do other stuff on this core pygame.time.wait( self._last_wait ) # shut down pygame pygame.quit() # return exit status return 0 #========================================================================= def trigger_key_event( self, event ): """ Initiates key input events. """ # ZIH - temp, just seeing how to poll the keys mods = pygame.key.get_mods() mod_bits = [ ( pygame.KMOD_ALT, 'A' ), ( pygame.KMOD_CTRL, 'C' ), ( pygame.KMOD_SHIFT, 'S' ) ] mod_str = ''.join( b[ 1 ] for b in mod_bits if b[ 0 ] & mods ) if event.type == pygame.KEYUP: self._debug = '({})'.format( mod_str ) elif event.type == pygame.KEYDOWN: self._debug = '({}){}'.format( mod_str, pygame.key.name( event.key ) ) #========================================================================= def update( self ): """ Updates the display. """ # update overlayed information self._top.set_text( ' [ fps:{:4.0f} sch:{:3} tck:{:08} dbg:{} ]'.format( self._fps, self._last_wait, self._last_tick, self._debug ) ) # draw the display on the back buffer self._draw_layers() # update the display (swap video buffers) pygame.display.update() #========================================================================= def _draw_layers( self ): """ Blits all the display layers onto the back buffer. """ # fill the background self.window.fill( ( 32, 32, 32 ) ) # blit all user layers for layer in self._layers: layer.blit( self.window ) # blit the top layer self._top.blit( self.window )
2.96875
3
Authentication/migrations/0004_auto_20201115_1105.py
CHESyrian/Estebyan
0
3157
<filename>Authentication/migrations/0004_auto_20201115_1105.py # Generated by Django 3.0.6 on 2020-11-15 09:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Authentication', '0003_auto_20201113_2210'), ] operations = [ migrations.AlterField( model_name='profiles', name='Qu_Shares', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='profiles', name='Questionnais', field=models.IntegerField(default=0), ), ]
1.34375
1
dashboard/urls.py
EdisonBr/MockDados
0
3158
<filename>dashboard/urls.py from django.urls import path, re_path from django.views.generic.base import TemplateView from .views import dashboard_cost, dashboard_energy, MotorDataListView app_name = 'dashboard' urlpatterns = [ path('', MotorDataListView.as_view(), name='dashboard_custom'), #path('', dashboard_custom, name='dashboard_custom'), path('energy', dashboard_energy, name='dashboard_energy'), path('cost', dashboard_cost, name='dashboard_cost'), ]
1.84375
2
Coursera/Python for Everybody Specialization/Python for everybody basics/hourly rate.py
ejgarcia1991/Courses-and-other-non-professional-projects
1
3159
hrs = input("Enter Hours:") rate = input("Enter rate:") pay = float(hrs) * float(rate) print("Pay: " +str(pay))
3.796875
4
litex_boards/platforms/xilinx_kcu105.py
smunaut/litex-boards
177
3160
# # This file is part of LiteX-Boards. # # Copyright (c) 2017-2019 <NAME> <<EMAIL>> # SPDX-License-Identifier: BSD-2-Clause from litex.build.generic_platform import * from litex.build.xilinx import XilinxPlatform, VivadoProgrammer # IOs ---------------------------------------------------------------------------------------------- _io = [ # Clk / Rst ("clk125", 0, Subsignal("p", Pins("G10"), IOStandard("LVDS")), Subsignal("n", Pins("F10"), IOStandard("LVDS")) ), ("clk300", 0, Subsignal("p", Pins("AK17"), IOStandard("DIFF_SSTL12")), Subsignal("n", Pins("AK16"), IOStandard("DIFF_SSTL12")) ), ("cpu_reset", 0, Pins("AN8"), IOStandard("LVCMOS18")), # Leds ("user_led", 0, Pins("AP8"), IOStandard("LVCMOS18")), ("user_led", 1, Pins("H23"), IOStandard("LVCMOS18")), ("user_led", 2, Pins("P20"), IOStandard("LVCMOS18")), ("user_led", 3, Pins("P21"), IOStandard("LVCMOS18")), ("user_led", 4, Pins("N22"), IOStandard("LVCMOS18")), ("user_led", 5, Pins("M22"), IOStandard("LVCMOS18")), ("user_led", 6, Pins("R23"), IOStandard("LVCMOS18")), ("user_led", 7, Pins("P23"), IOStandard("LVCMOS18")), # Buttons ("user_btn_c", 0, Pins("AE10"), IOStandard("LVCMOS18")), ("user_btn_n", 0, Pins("AD10"), IOStandard("LVCMOS18")), ("user_btn_s", 0, Pins("AF8"), IOStandard("LVCMOS18")), ("user_btn_w", 0, Pins("AF9"), IOStandard("LVCMOS18")), ("user_btn_e", 0, Pins("AE8"), IOStandard("LVCMOS18")), # Switches ("user_dip_btn", 0, Pins("AN16"), IOStandard("LVCMOS12")), ("user_dip_btn", 1, Pins("AN19"), IOStandard("LVCMOS12")), ("user_dip_btn", 2, Pins("AP18"), IOStandard("LVCMOS12")), ("user_dip_btn", 3, Pins("AN14"), IOStandard("LVCMOS12")), # SMA ("user_sma_clock", 0, Subsignal("p", Pins("D23"), IOStandard("LVDS")), Subsignal("n", Pins("C23"), IOStandard("LVDS")) ), ("user_sma_clock_p", 0, Pins("D23"), IOStandard("LVCMOS18")), ("user_sma_clock_n", 0, Pins("C23"), IOStandard("LVCMOS18")), ("user_sma_gpio", 0, Subsignal("p", Pins("H27"), IOStandard("LVDS")), Subsignal("n", Pins("G27"), IOStandard("LVDS")) ), ("user_sma_gpio_p", 0, Pins("H27"), IOStandard("LVCMOS18")), ("user_sma_gpio_n", 0, Pins("G27"), IOStandard("LVCMOS18")), # I2C ("i2c", 0, Subsignal("scl", Pins("J24")), Subsignal("sda", Pins("J25")), IOStandard("LVCMOS18") ), # Serial ("serial", 0, Subsignal("cts", Pins("L23")), Subsignal("rts", Pins("K27")), Subsignal("tx", Pins("K26")), Subsignal("rx", Pins("G25")), IOStandard("LVCMOS18") ), # SPIFlash ("spiflash", 0, # clock needs to be accessed through primitive Subsignal("cs_n", Pins("U7")), Subsignal("dq", Pins("AC7 AB7 AA7 Y7")), IOStandard("LVCMOS18") ), ("spiflash", 1, # clock needs to be accessed through primitive Subsignal("cs_n", Pins("G26")), Subsignal("dq", Pins("M20 L20 R21 R22")), IOStandard("LVCMOS18") ), # SDCard ("spisdcard", 0, Subsignal("clk", Pins("AL10")), Subsignal("cs_n", Pins("AH8")), Subsignal("mosi", Pins("AD9"), Misc("PULLUP")), Subsignal("miso", Pins("AP9"), Misc("PULLUP")), Misc("SLEW=FAST"), IOStandard("LVCMOS18") ), ("sdcard", 0, Subsignal("clk", Pins("AL10")), Subsignal("cmd", Pins("AD9"), Misc("PULLUP True")), Subsignal("data", Pins("AP9 AN9 AH9 AH8"), Misc("PULLUP True")), Misc("SLEW=FAST"), IOStandard("LVCMOS18") ), # Rotary Encoder ("rotary", 0, Subsignal("a", Pins("Y21")), Subsignal("b", Pins("AD26")), Subsignal("push", Pins("AF28")), IOStandard("LVCMOS18") ), # HDMI ("hdmi", 0, Subsignal("d", Pins( "AK11 AP11 AP13 AN13 AN11 AM11 AN12 AM12", "AL12 AK12 AL13 AK13 AD11 AH12 AG12 AJ11", "AG10 AK8")), Subsignal("de", Pins("AE11")), Subsignal("clk", Pins("AF13")), Subsignal("vsync", Pins("AH13")), Subsignal("hsync", Pins("AE13")), Subsignal("spdif", Pins("AE12")), Subsignal("spdif_out", Pins("AF12")), IOStandard("LVCMOS18") ), # DDR4 SDRAM ("ddram", 0, Subsignal("a", Pins( "AE17 AH17 AE18 AJ15 AG16 AL17 AK18 AG17", "AF18 AH19 AF15 AD19 AJ14 AG19"), IOStandard("SSTL12_DCI")), Subsignal("ba", Pins("AF17 AL15"), IOStandard("SSTL12_DCI")), Subsignal("bg", Pins("AG15"), IOStandard("SSTL12_DCI")), Subsignal("ras_n", Pins("AF14"), IOStandard("SSTL12_DCI")), # A16 Subsignal("cas_n", Pins("AG14"), IOStandard("SSTL12_DCI")), # A15 Subsignal("we_n", Pins("AD16"), IOStandard("SSTL12_DCI")), # A14 Subsignal("cs_n", Pins("AL19"), IOStandard("SSTL12_DCI")), Subsignal("act_n", Pins("AH14"), IOStandard("SSTL12_DCI")), #Subsignal("ten", Pins("AH16"), IOStandard("SSTL12_DCI")), #Subsignal("alert_n", Pins("AJ16"), IOStandard("SSTL12_DCI")), #Subsignal("par", Pins("AD18"), IOStandard("SSTL12_DCI")), Subsignal("dm", Pins("AD21 AE25 AJ21 AM21 AH26 AN26 AJ29 AL32"), IOStandard("POD12_DCI")), Subsignal("dq", Pins( "AE23 AG20 AF22 AF20 AE22 AD20 AG22 AE20", "AJ24 AG24 AJ23 AF23 AH23 AF24 AH22 AG25", "AL22 AL25 AM20 AK23 AK22 AL24 AL20 AL23", "AM24 AN23 AN24 AP23 AP25 AN22 AP24 AM22", "AH28 AK26 AK28 AM27 AJ28 AH27 AK27 AM26", "AL30 AP29 AM30 AN28 AL29 AP28 AM29 AN27", "AH31 AH32 AJ34 AK31 AJ31 AJ30 AH34 AK32", "AN33 AP33 AM34 AP31 AM32 AN31 AL34 AN32"), IOStandard("POD12_DCI"), Misc("PRE_EMPHASIS=RDRV_240"), Misc("EQUALIZATION=EQ_LEVEL2")), Subsignal("dqs_p", Pins("AG21 AH24 AJ20 AP20 AL27 AN29 AH33 AN34"), IOStandard("DIFF_POD12_DCI"), Misc("PRE_EMPHASIS=RDRV_240"), Misc("EQUALIZATION=EQ_LEVEL2")), Subsignal("dqs_n", Pins("AH21 AJ25 AK20 AP21 AL28 AP30 AJ33 AP34"), IOStandard("DIFF_POD12_DCI"), Misc("PRE_EMPHASIS=RDRV_240"), Misc("EQUALIZATION=EQ_LEVEL2")), Subsignal("clk_p", Pins("AE16"), IOStandard("DIFF_SSTL12_DCI")), Subsignal("clk_n", Pins("AE15"), IOStandard("DIFF_SSTL12_DCI")), Subsignal("cke", Pins("AD15"), IOStandard("SSTL12_DCI")), Subsignal("odt", Pins("AJ18"), IOStandard("SSTL12_DCI")), Subsignal("reset_n", Pins("AL18"), IOStandard("LVCMOS12")), Misc("SLEW=FAST"), ), # PCIe ("pcie_x1", 0, Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")), Subsignal("clk_p", Pins("AB6")), Subsignal("clk_n", Pins("AB5")), Subsignal("rx_p", Pins("AB2")), Subsignal("rx_n", Pins("AB1")), Subsignal("tx_p", Pins("AC4")), Subsignal("tx_n", Pins("AC3")) ), ("pcie_x2", 0, Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")), Subsignal("clk_p", Pins("AB6")), Subsignal("clk_n", Pins("AB5")), Subsignal("rx_p", Pins("AB2 AD2")), Subsignal("rx_n", Pins("AB1 AD1")), Subsignal("tx_p", Pins("AC4 AE4")), Subsignal("tx_n", Pins("AC3 AE3")) ), ("pcie_x4", 0, Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")), Subsignal("clk_p", Pins("AB6")), Subsignal("clk_n", Pins("AB5")), Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2")), Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1")), Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6")), Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5")) ), ("pcie_x8", 0, Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")), Subsignal("clk_p", Pins("AB6")), Subsignal("clk_n", Pins("AB5")), Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2 AJ4 AK2 AM2 AP2")), Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1 AJ3 AK1 AM1 AP1")), Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6 AK6 AL4 AM6 AN4")), Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5 AK5 AL3 AM5 AN3")) ), # SGMII Clk ("sgmii_clock", 0, Subsignal("p", Pins("P26"), IOStandard("LVDS_25")), Subsignal("n", Pins("N26"), IOStandard("LVDS_25")) ), # SI570 ("si570_refclk", 0, Subsignal("p", Pins("P6")), Subsignal("n", Pins("P5")) ), # SMA ("user_sma_mgt_refclk", 0, Subsignal("p", Pins("V6")), Subsignal("n", Pins("V5")) ), ("user_sma_mgt_tx", 0, Subsignal("p", Pins("R4")), Subsignal("n", Pins("R3")) ), ("user_sma_mgt_rx", 0, Subsignal("p", Pins("P2")), Subsignal("n", Pins("P1")) ), # SFP ("sfp", 0, Subsignal("txp", Pins("U4")), Subsignal("txn", Pins("U3")), Subsignal("rxp", Pins("T2")), Subsignal("rxn", Pins("T1")) ), ("sfp_tx", 0, Subsignal("p", Pins("U4")), Subsignal("n", Pins("U3")), ), ("sfp_rx", 0, Subsignal("p", Pins("T2")), Subsignal("n", Pins("T1")), ), ("sfp_tx_disable_n", 0, Pins("AL8"), IOStandard("LVCMOS18")), ("sfp", 1, Subsignal("txp", Pins("W4")), Subsignal("txn", Pins("W3")), Subsignal("rxp", Pins("V2")), Subsignal("rxn", Pins("V1")) ), ("sfp_tx", 1, Subsignal("p", Pins("W4")), Subsignal("n", Pins("W3")), ), ("sfp_rx", 1, Subsignal("p", Pins("V2")), Subsignal("n", Pins("V1")), ), ("sfp_tx_disable_n", 1, Pins("D28"), IOStandard("LVCMOS18")), ] # Connectors --------------------------------------------------------------------------------------- _connectors = [ ("HPC", { "DP0_C2M_P" : "F6", "DP0_C2M_N" : "F5", "DP0_M2C_P" : "E4", "DP0_M2C_N" : "E3", "DP1_C2M_P" : "D6", "DP1_C2M_N" : "D5", "DP1_M2C_P" : "D2", "DP1_M2C_N" : "D1", "DP2_C2M_P" : "C4", "DP2_C2M_N" : "C3", "DP2_M2C_P" : "B2", "DP2_M2C_N" : "B1", "DP3_C2M_P" : "B6", "DP3_C2M_N" : "B5", "DP3_M2C_P" : "A4", "DP3_M2C_N" : "A3", "DP4_C2M_P" : "N4", "DP4_C2M_N" : "N3", "DP4_M2C_P" : "M2", "DP4_M2C_N" : "M1", "DP5_C2M_P" : "J4", "DP5_C2M_N" : "J3", "DP5_M2C_P" : "H2", "DP5_M2C_N" : "H1", "DP6_C2M_P" : "L4", "DP6_C2M_N" : "L3", "DP6_M2C_P" : "K2", "DP6_M2C_N" : "K1", "DP7_C2M_P" : "G4", "DP7_C2M_N" : "G3", "DP7_M2C_P" : "F2", "DP7_M2C_N" : "F1", "LA06_P" : "D13", "LA06_N" : "C13", "LA10_P" : "L8", "LA10_N" : "K8", "LA14_P" : "B10", "LA14_N" : "A10", "LA18_CC_P" : "E22", "LA18_CC_N" : "E23", "LA27_P" : "H21", "LA27_N" : "G21", "HA01_CC_P" : "E16", "HA01_CC_N" : "D16", "HA05_P" : "J15", "HA05_N" : "J14", "HA09_P" : "F18", "HA09_N" : "F17", "HA13_P" : "B14", "HA13_N" : "A14", "HA16_P" : "A19", "HA16_N" : "A18", "HA20_P" : "C19", "HA20_N" : "B19", "CLK1_M2C_P" : "E25", "CLK1_M2C_N" : "D25", "LA00_CC_P" : "H11", "LA00_CC_N" : "G11", "LA03_P" : "A13", "LA03_N" : "A12", "LA08_P" : "J8", "LA08_N" : "H8", "LA12_P" : "E10", "LA12_N" : "D10", "LA16_P" : "B9", "LA16_N" : "A9", "LA20_P" : "B24", "LA20_N" : "A24", "LA22_P" : "G24", "LA22_N" : "F25", "LA25_P" : "D20", "LA25_N" : "D21", "LA29_P" : "B20", "LA29_N" : "A20", "LA31_P" : "B25", "LA31_N" : "A25", "LA33_P" : "A27", "LA33_N" : "A28", "HA03_P" : "G15", "HA03_N" : "G14", "HA07_P" : "L19", "HA07_N" : "L18", "HA11_P" : "J19", "HA11_N" : "J18", "HA14_P" : "F15", "HA14_N" : "F14", "HA18_P" : "B17", "HA18_N" : "B16", "HA22_P" : "C18", "HA22_N" : "C17", "GBTCLK1_M2C_P" : "H6", "GBTCLK1_M2C_N" : "H5", "GBTCLK0_M2C_P" : "K6", "GBTCLK0_M2C_N" : "K5", "LA01_CC_P" : "G9", "LA01_CC_N" : "F9", "LA05_P" : "L13", "LA05_N" : "K13", "LA09_P" : "J9", "LA09_N" : "H9", "LA13_P" : "D9", "LA13_N" : "C9", "LA17_CC_P" : "D24", "LA17_CC_N" : "C24", "LA23_P" : "G22", "LA23_N" : "F22", "LA26_P" : "G20", "LA26_N" : "F20", "PG_M2C" : "L27", "HA00_CC_P" : "G17", "HA00_CC_N" : "G16", "HA04_P" : "G19", "HA04_N" : "F19", "HA08_P" : "K18", "HA08_N" : "K17", "HA12_P" : "K16", "HA12_N" : "J16", "HA15_P" : "D14", "HA15_N" : "C14", "HA19_P" : "D19", "HA19_N" : "D18", "PRSNT_M2C_B" : "H24", "CLK0_M2C_P" : "H12", "CLK0_M2C_N" : "G12", "LA02_P" : "K10", "LA02_N" : "J10", "LA04_P" : "L12", "LA04_N" : "K12", "LA07_P" : "F8", "LA07_N" : "E8", "LA11_P" : "K11", "LA11_N" : "J11", "LA15_P" : "D8", "LA15_N" : "C8", "LA19_P" : "C21", "LA19_N" : "C22", "LA21_P" : "F23", "LA21_N" : "F24", "LA24_P" : "E20", "LA24_N" : "E21", "LA28_P" : "B21", "LA28_N" : "B22", "LA30_P" : "C26", "LA30_N" : "B26", "LA32_P" : "E26", "LA32_N" : "D26", "HA02_P" : "H19", "HA02_N" : "H18", "HA06_P" : "L15", "HA06_N" : "K15", "HA10_P" : "H17", "HA10_N" : "H16", "HA17_CC_P" : "E18", "HA17_CC_N" : "E17", "HA21_P" : "E15", "HA21_N" : "D15", "HA23_P" : "B15", "HA23_N" : "A15", } ), ("LPC", { "GBTCLK0_M2C_P" : "AA24", "GBTCLK0_M2C_N" : "AA25", "LA01_CC_P" : "W25", "LA01_CC_N" : "Y25", "LA05_P" : "V27", "LA05_N" : "V28", "LA09_P" : "V26", "LA09_N" : "W26", "LA13_P" : "AA20", "LA13_N" : "AB20", "LA17_CC_P" : "AA32", "LA17_CC_N" : "AB32", "LA23_P" : "AD30", "LA23_N" : "AD31", "LA26_P" : "AF33", "LA26_N" : "AG34", "CLK0_M2C_P" : "AA24", "CLK0_M2C_N" : "AA25", "LA02_P" : "AA22", "LA02_N" : "AB22", "LA04_P" : "U26", "LA04_N" : "U27", "LA07_P" : "V22", "LA07_N" : "V23", "LA11_P" : "V21", "LA11_N" : "W21", "LA15_P" : "AB25", "LA15_N" : "AB26", "LA19_P" : "AA29", "LA19_N" : "AB29", "LA21_P" : "AC33", "LA21_N" : "AD33", "LA24_P" : "AE32", "LA24_N" : "AF32", "LA28_P" : "V31", "LA28_N" : "W31", "LA30_P" : "Y31", "LA30_N" : "Y32", "LA32_P" : "W30", "LA32_N" : "Y30", "LA06_P" : "V29", "LA06_N" : "W29", "LA10_P" : "T22", "LA10_N" : "T23", "LA14_P" : "U21", "LA14_N" : "U22", "LA18_CC_P" : "AB30", "LA18_CC_N" : "AB31", "LA27_P" : "AG31", "LA27_N" : "AG32", "CLK1_M2C_P" : "AC31", "CLK1_M2C_N" : "AC32", "LA00_CC_P" : "W23", "LA00_CC_N" : "W24", "LA03_P" : "W28", "LA03_N" : "Y28", "LA08_P" : "U24", "LA08_N" : "U25", "LA12_P" : "AC22", "LA12_N" : "AC23", "LA16_P" : "AB21", "LA16_N" : "AC21", "LA20_P" : "AA34", "LA20_N" : "AB34", "LA22_P" : "AC34", "LA22_N" : "AD34", "LA25_P" : "AE33", "LA25_N" : "AF34", "LA29_P" : "U34", "LA29_N" : "V34", "LA31_P" : "V33", "LA31_N" : "W34", "LA33_P" : "W33", "LA33_N" : "Y33", } ), ("pmod0", "AK25 AN21 AH18 AM19 AE26 AF25 AE21 AM17"), ("pmod1", "AL14 AM14 AP16 AP15 AM16 AM15 AN18 AN17"), ] # Platform ----------------------------------------------------------------------------------------- class Platform(XilinxPlatform): default_clk_name = "clk125" default_clk_period = 1e9/125e6 def __init__(self): XilinxPlatform.__init__(self, "xcku040-ffva1156-2-e", _io, _connectors, toolchain="vivado") def create_programmer(self): return VivadoProgrammer() def do_finalize(self, fragment): XilinxPlatform.do_finalize(self, fragment) self.add_period_constraint(self.lookup_request("clk125", loose=True), 1e9/125e6) self.add_period_constraint(self.lookup_request("clk300", loose=True), 1e9/300e6) self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 44]") self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 45]") self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 46]")
1.34375
1
code/advent_of_code_day3.py
erinleeryan/2020adventofcode
0
3161
<gh_stars>0 #!/usr/bin/env python # coding: utf-8 # In[1]: import numpy as np import math # In[2]: fileObj = open('../data/advent_of_code_input_day_three.txt', "r") #opens the file in read mode. items = fileObj. read(). splitlines() #puts the file into an array. # In[3]: #print (items) def split(line): return list(line) holding = [] for i, line in enumerate(items): result = split(line) holding.append(result) holding = np.array(holding) holding[holding == '.'] = 0 holding[holding == '#'] = 1 holding = holding.astype(int) print (holding) # In[7]: def dup_and_count(rightstep, downstep, basedata): needed_slope_elements = math.floor(basedata.shape[0]/downstep) replications_needed = (needed_slope_elements* rightstep)/basedata.shape[1] duplicated = np.tile(basedata, math.ceil(replications_needed)) right = np.arange(0,(needed_slope_elements)*rightstep, rightstep).astype(int) down = np.arange(0,(needed_slope_elements)*downstep,downstep).astype(int) moves = [] for ii in range(len(right)): moves.append(duplicated[down[ii], right[ii]]) hits = np.sum(moves) return hits down1_right3 = dup_and_count(3,1,holding) down1_right1 = dup_and_count(1,1,holding) down1_right5 = dup_and_count(5,1,holding) down1_right7 = dup_and_count(7,1,holding) down2_right1 = dup_and_count(1,2,holding) results = np.array([down1_right3, down1_right1, down1_right5, down1_right7, down2_right1], dtype=np.int64) print(results) product = np.prod(results) print (product) # In[ ]:
3.375
3
input_handler.py
Wyverns010/Body-Keypoints-Detection
1
3162
import os import traceback class InputHandler: IMAGES_PARENT_FOLDER = './images' def __init__(self): filesList = [] def listFiles(self,path=''): if path != '': self.IMAGES_PARENT_FOLDER = path try: self.listFiles = [os.path.join(self.IMAGES_PARENT_FOLDER,imageFile) for imageFile in os.listdir(self.IMAGES_PARENT_FOLDER)\ if os.path.isfile(os.path.join(self.IMAGES_PARENT_FOLDER,imageFile))] except: print(traceback.print_exec()) return self.listFiles if __name__ == '__main__': obj = InputHandler() print(obj.listFiles())
3.109375
3
docker/autoconfig.py
misc0110/bepasty-server
0
3163
<filename>docker/autoconfig.py #!/usr/bin/python import os import sys SITENAME = os.environ.get("BEPASTY_SITENAME", None) if SITENAME is None: print("\n\nEnvironment variable BEPASTY_SITENAME must be set.") sys.exit(1) SECRET_KEY = os.environ.get("BEPASTY_SECRET_KEY", None) if SECRET_KEY is None: print("\n\nEnvironment variable BEPASTY_SECRET_KEY must be set.") sys.exit(1) APP_BASE_PATH = os.environ.get("BEPASTY_APP_BASE_PATH", None) STORAGE_FILESYSTEM_DIRECTORY = os.environ.get( "BEPASTY_STORAGE_FILESYSTEM_DIRECTORY", "/app/data", ) DEFAULT_PERMISSIONS = os.environ.get("BEPASTY_DEFAULT_PERMISSIONS", "create,read") PERMISSIONS = {} admin_secret = os.environ.get("BEPASTY_ADMIN_SECRET", None) if admin_secret is not None: PERMISSIONS.update({admin_secret: "admin,list,create,modify,read,delete"}) try: max_allowed_file_size = os.environ.get("BEPASTY_MAX_ALLOWED_FILE_SIZE", 5000000000) MAX_ALLOWED_FILE_SIZE = int(max_allowed_file_size) except ValueError as err: print("\n\nInvalid BEPASTY_MAX_ALLOWED_FILE_SIZE: %s", str(err)) sys.exit(1) try: max_body_size = os.environ.get("BEPASTY_MAX_BODY_SIZE", 1040384) MAX_BODY_SIZE = int(max_body_size) except ValueError as err: print("\n\nInvalid BEPASTY_MAX_BODY_SIZE: %s", str(err)) sys.exit(1)
2.265625
2
pysh/transforms/alpha/bangexpr.py
drslump/pysh
3
3164
<reponame>drslump/pysh from io import StringIO import re import tokenize import os from collections import deque, ChainMap from functools import lru_cache from enum import Enum import pysh from pysh.path import PathWrapper, Path from typing import List, Callable, Iterator, Tuple, NamedTuple, Deque, Union, Any TBangTransformer = Callable[ [List[str]], Iterator[str]] # runtime symbols __all__ = ['BangExpr', 'BangOp', 'BangSeq', 'BangGlob', 'BangEnv', 'BangBang'] class BangTokenType(Enum): OPAQUE = 'OPAQUE' GLOB = 'GLOB' LOCAL = 'LOCAL' ENV = 'ENV' EXPR = 'EXPR' OP = 'OP' class BangToken(NamedTuple): type: BangTokenType value: str span: Tuple[int, int] TBangLexerToken = Tuple[str, str, Tuple[int,int]] class BangLexer: def _tokener(self, token, transformer=lambda x: x, **kwargs): def cb(s, v): v = transformer(v, **kwargs) return None if v is None else (token, v, (s.match.start(), s.match.end())) return cb @lru_cache() # it's intended for this to be global def build_scanner(self): t = self._tokener return re.Scanner([ (r'\#.+', t('COMMENT', lambda v: v[1:])), (r'\\.', t('ESCAPE')), (r"'( \\. | [^\\']+ )+'", t('SQS', lambda v: v[1:-1])), (r'"( \\. | [^\\"]+ )+"', t('DQS', lambda v: v[1:-1])), (r'\$[A-Za-z_][A-Za-z0-9_]*', t('VAR', lambda v: v[1:])), (r'\${( \\. | [^\\}]+ )+}', t('EXPR', lambda v: v[2:-1])), (r'[|<>^]+', t('OP')), (r'[A-Za-z0-9_%*+:.,=/@~\[\]{}-]+', t('OPAQUE')), (r'\s+', t('WS')), ], flags=re.X) @lru_cache() def build_dqs_scanner(self): t = self._tokener return re.Scanner([ (r'\\.', t('ESCAPE')), (r'\$[A-Za-z_][A-Za-z0-9_]*', t('VAR', lambda v: v[1:])), (r'\${( \\. | [^\\}]+ )+}', t('EXPR', lambda v: v[2:-1])), (r'[^\\\$]+', t('SQS')) # handle as single quoted ], flags=re.X) def scan_dqs(self, code: str, offset=0) -> Iterator[TBangLexerToken]: tokens, remaining = self.build_scanner().scan(code) if remaining: raise SyntaxError('Unexpected char <{}> at position {}'.format(remaining[0], len(code)-len(remaining))) for tkn, val, pos in tokens: yield tkn, val, (offset+pos[0], offset+pos[1]) def demux_dqs(self, tokens: Iterator[TBangLexerToken]) -> Iterator[TBangLexerToken]: """ Split double quoted strings into parts """ for tkn, val, pos in tokens: if tkn == 'DQS': yield from self.scan_dqs(val, offset=pos[0]+1) else: yield tkn, val, pos def scan(self, code: str) -> Iterator[BangToken]: tokens, remaining = self.build_scanner().scan(code) if remaining: raise SyntaxError('Unexpected char at position {}'.format(len(code)-len(remaining))) # Add a terminating token so we can simplify the parsing tokens.append(('END', '', (len(code),len(code)))) last_token = last_pos = None for token, value, pos in self.demux_dqs(tokens): assert token != 'DQS' # double quoted are demuxed # Inject whitespace operator if needed if token != 'OP' and last_token and last_token == 'WS': yield BangToken(BangTokenType.OP, ' ', last_pos) if token in ('COMMENT', 'END'): continue elif token == 'WS': pass elif token == 'OP': value = value.strip() yield BangToken(BangTokenType.OP, value, pos) else: if token == 'OPAQUE': if re.search(r'(?!<\\)[~*?{]', value): yield BangToken(BangTokenType.GLOB, value, pos) else: yield BangToken(BangTokenType.OPAQUE, value, pos) elif token in ('ESCAPE', 'SQS'): #TODO: handle special escapes \n value = re.sub(r'\\(.)', r'\1', value) yield BangToken(BangTokenType.OPAQUE, value, pos) elif token in ('VAR', 'EXPR'): value = value.strip() if value.isalnum() and not value.isdigit(): if value.isupper(): yield BangToken(BangTokenType.ENV, value, pos) else: yield BangToken(BangTokenType.LOCAL, value, pos) else: assert token == 'EXPR' value = re.sub(r'\\(.)', r'\1', value) yield BangToken(BangTokenType.EXPR, value, pos) else: assert False, 'unexpected {}, what happened?'.format(token) last_token, last_pos = token, pos class BangEnv: __slots__ = ('name',) def __init__(self, name): self.name = name def __repr__(self): return 'BangEnv<{}>'.format(self.name) class BangSeq: __slots__ = ('items',) def __init__(self, *items): self.items = items def __repr__(self): return 'BangSeq<{!r}>'.format(self.items) class BangOp: __slots__ = ('op',) def __init__(self, op): self.op = op def __repr__(self): return 'BangOp<{}>'.format(self.op) class BangGlob: __slots__ = ('glob',) def __init__(self, glob): self.glob = glob def __repr__(self): return 'BangGlob<{}>'.format(self.glob) class BangExpr: __slots__ = ('args', 'vars') def __init__(self, *args, locals=None, globals=None): assert locals is not None assert globals is not None self.args = args self.vars = ChainMap(locals, globals) def eval_command(self, mut_args): arg = mut_args.popleft() cmd = self.vars.get(str(arg)) if cmd is None: raise RuntimeError('Unable to find {}'.format(arg)) while mut_args: if isinstance(mut_args[0], BangOp): break arg = mut_args.popleft() cmd = cmd(self.eval_expr(arg)) return cmd def eval_expr(self, expr: Any) -> Union[str, Iterator[Path]]: if isinstance(expr, BangSeq): return self.eval_seq(expr) elif isinstance(expr, BangEnv): return os.environ[expr.name] elif isinstance(expr, BangGlob): return PathWrapper().glob(expr.glob) else: return str(expr) def eval_seq(self, seq: BangSeq) -> Union[str, Iterator[Path]]: exprs: Deque[Any] = deque(seq.items) accum = '' while exprs: expr = exprs.popleft() if isinstance(expr, BangGlob): if exprs: raise RuntimeError('Globbing can only occur at the end of a seq') return PathWrapper(accum).glob(expr.glob) accum += self.eval_expr(expr) return accum def eval(self): mut_args = deque(self.args) cmd = self.eval_command(mut_args) while mut_args: arg = mut_args.popleft() assert isinstance(arg, BangOp), 'Expected OP but found: {}'.format(arg) assert len(mut_args) > 0, 'No operands left!' if arg.op == '|': cmd |= self.eval_command(mut_args) elif arg.op == '^': cmd ^= self.eval_command(mut_args) elif arg.op == '>': cmd = cmd > self.eval_expr(mut_args.popleft()) elif arg.op == '>>': cmd = cmd >> self.eval_expr(mut_args.popleft()) else: raise RuntimeError('Unsupported operator {}'.format(arg.op)) return cmd def __str__(self): return str(self.eval()) def __repr__(self): return 'BangExpr<{!r}>'.format(self.args) class BangBang: __slots__ = ('code',) def __init__(self, code): self.code = code def eval(self): #TODO: Detect shebang and use it instead of default shell import sys, subprocess result = subprocess.run( ['bash', '-c', self.code], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE) if result.stderr: print(result.stderr, file=sys.stderr) if result.returncode > 0: if result.stdout: print(result.stdout) raise pysh.ExitStatusError(result.returncode) return result.stdout def __str__(self): return str(self.eval()) def __repr__(self): return 'BangBang<{}>'.format(self.code) def parse_bangexpr(code: str) -> str: as_str = lambda s: "'{}'".format(s.replace("\\", "\\\\").replace("'", "\\'")) lexer = BangLexer().scan(code) seq = [] exprs = [] while True: tkn = next(lexer, None) if tkn and tkn.type != BangTokenType.OP: if tkn.type in (BangTokenType.LOCAL, BangTokenType.EXPR): seq.append(tkn.value) elif tkn.type == BangTokenType.ENV: seq.append('pysh.BangEnv({})'.format(as_str(tkn.value))) elif tkn.type == BangTokenType.OPAQUE: seq.append('{}'.format(as_str(tkn.value))) elif tkn.type == BangTokenType.GLOB: seq.append('pysh.BangGlob({})'.format(as_str(tkn.value))) else: assert False, 'Unexpected token {}'.format(tkn.type) continue if seq: if len(seq) > 1: exprs.append('pysh.BangSeq({})'.format(', '.join(seq))) else: exprs.append(seq[0]) seq = [] if not tkn: break assert tkn.type == BangTokenType.OP if tkn.value == ' ': continue exprs.append('pysh.BangOp("{}")'.format(tkn.value)) # We need to provide locals/globals so we can resolve commands to variables return 'pysh.BangExpr({}, locals=locals(), globals=globals())'.format(', '.join(exprs)) def transform(code: StringIO, transformer: TBangTransformer) -> Iterator[str]: """ Scans python code to transform bang expressions. Given some python code it will extract bang expressions and process them with a callback that can report back the transformation. Returns a generator that allows to consume the transformed code line by line. """ tokens = tokenize.generate_tokens(code.readline) bangexpr = [] # type: List[str] bangcont = False prebang = None ptkn = None indent = 0 bang_indent = -100 last_bang_line = -100 for ctkn in tokens: if ctkn.type == tokenize.INDENT: indent += 1 if last_bang_line + 1 == ctkn.start[0]: bang_indent = indent elif ctkn.type == tokenize.DEDENT: indent -= 1 if bang_indent > indent: bang_indent = -100 # due to continuations we can't rely on NEWLINE tokens, instead we have # use the lexical information to detect when we're on a new line #TODO: Support indent/dedent for multiline if ptkn and ctkn.start[0] > ptkn.start[0]: if bangcont or bang_indent == indent: if ctkn.type is tokenize.ENDMARKER: raise SyntaxError('BangExpr continuation at program end') line = ctkn.line.rstrip('\r\n') bangexpr.append(line) bangcont = line.endswith('\\') last_bang_line = ctkn.start[0] elif bangexpr: lines = list(transformer(bangexpr)) assert len(lines) <= len(bangexpr) if lines and prebang: lines[0] = prebang + lines[0] yield from lines bangexpr = [] last_bang_line = ptkn.start[0] else: yield ptkn.line ptkn = ctkn if bangexpr: continue if ctkn.string == '!': col = ctkn.start[1] prebang = ctkn.line[0:col] line = ctkn.line[col+1:].lstrip(' \t').rstrip('\r\n') bangexpr.append(line.rstrip('\\')) bangcont = line.endswith('\\') last_bang_line = ctkn.start[0] assert not bangexpr, bangexpr def transformer(lines: List[str]) -> Iterator[str]: if lines[0].startswith('!'): #TODO: Detect $ident to expose them on env when evaluated lines[0] = lines[0][1:] code = '\n'.join(lines) code = code.strip().replace("'", "\\'").replace("\\", "\\\\") code = "pysh.BangBang('{}')".format(code) lines = code.split('\n') for line in lines: yield line else: yield from parse_bangexpr(' '.join(lines)).split('\n') from io import StringIO code = r''' foo = ! ls foo${bar}.* \ | grep foo > /dev/null foo = r' ls foo${bar} ' >> expr expr<' ls foo${bar} ' !! #!/bin/fish ls .* '''.strip() #TODO: !! is probably better solved with: # locals are solved with inspect.frame.f_locals sh << r''' # << means with variables interpolated # < is plain text ls .* ''' for line in transform(StringIO(code), transformer): print(line.rstrip('\n')) from pysh.command import command ls = command('ls') grep = command('grep') bar = 10 print('::BangExpr::') be = BangExpr('ls', BangSeq('foo', bar, BangGlob('.*')), BangOp("|"), 'grep', 'foo', 'baz', BangOp(">"), '/dev/null', locals=locals(), globals=globals()) # print(be) print('::BangBang::') bb = BangBang('''#!/bin/bash ls *.py''') print(bb)
2.25
2
example/bayesian-methods/data_loader.py
Vikas-kum/incubator-mxnet
54
3165
<gh_stars>10-100 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import numpy import os import ssl def load_mnist(training_num=50000): data_path = os.path.join(os.path.dirname(os.path.realpath('__file__')), 'mnist.npz') if not os.path.isfile(data_path): from six.moves import urllib origin = ( 'https://github.com/sxjscience/mxnet/raw/master/example/bayesian-methods/mnist.npz' ) print('Downloading data from %s to %s' % (origin, data_path)) ctx = ssl._create_unverified_context() with urllib.request.urlopen(origin, context=ctx) as u, open(data_path, 'wb') as f: f.write(u.read()) print('Done!') dat = numpy.load(data_path) X = (dat['X'][:training_num] / 126.0).astype('float32') Y = dat['Y'][:training_num] X_test = (dat['X_test'] / 126.0).astype('float32') Y_test = dat['Y_test'] Y = Y.reshape((Y.shape[0],)) Y_test = Y_test.reshape((Y_test.shape[0],)) return X, Y, X_test, Y_test def load_toy(): training_data = numpy.loadtxt('toy_data_train.txt') testing_data = numpy.loadtxt('toy_data_test_whole.txt') X = training_data[:, 0].reshape((training_data.shape[0], 1)) Y = training_data[:, 1].reshape((training_data.shape[0], 1)) X_test = testing_data[:, 0].reshape((testing_data.shape[0], 1)) Y_test = testing_data[:, 1].reshape((testing_data.shape[0], 1)) return X, Y, X_test, Y_test def load_synthetic(theta1, theta2, sigmax, num=20): flag = numpy.random.randint(0, 2, (num,)) X = flag * numpy.random.normal(theta1, sigmax, (num,)) \ + (1.0 - flag) * numpy.random.normal(theta1 + theta2, sigmax, (num,)) return X
1.9375
2
start.py
mickeyckm/nanodegree-freshtomatoes
1
3166
import os import tmdbsimple as tmdb import media import fresh_tomatoes as ft movies = [] if os.environ.get('TMDB_API', False): # Retrieve API KEY tmdb.API_KEY = os.environ['TMDB_API'] # TMDB Movie Ids movie_ids = [271110, 297761, 246655, 278154, 135397, 188927] # Get Configuration configuration = tmdb.Configuration().info() image_base_url = configuration['images']['secure_base_url'] image_width = "w500" for movie_id in movie_ids: m = tmdb.Movies(movie_id) # Retrieve Image URL minfo = m.info() poster_image_url = image_base_url + image_width + minfo['poster_path'] # Retrieve Youtube Video URL videos = m.videos() video = videos['results'][0] youtube_url = 'https://youtube.com/watch?v=' + video['key'] # Append Movie object movie = media.Movie(m.title) movie.storyline = m.overview movie.poster_url = poster_image_url movie.trailer_url = youtube_url movies.append(movie) else: # Avatar avatar = media.Movie("Avatar") avatar.storyline = ("A paraplegic marine dispatched to the moon Pandora " "on a unique mission becomes torn between following " "his orders and protecting the world he feels is " "his home.") avatar.poster_url = ("https://upload.wikimedia.org/wikipedia/" "en/b/b0/Avatar-Teaser-Poster.jpg") avatar.trailer_url = "https://www.youtube.com/watch?v=-9ceBgWV8io" # Deadpool deadpool = media.Movie("Deadpool") deadpool.storyline = ("A fast-talking mercenary with a morbid sense of " "humor is subjected to a rogue experiment that " "leaves him with accelerated healing powers and a " "quest for revenge.") deadpool.poster_url = ("https://upload.wikimedia.org/wikipedia/en/4/46/" "Deadpool_poster.jpg") deadpool.trailer_url = "https://www.youtube.com/watch?v=gtTfd6tISfw" # Ghostbusters ghostbusters = media.Movie("Ghostbusters") ghostbusters.storyline = ("Following a ghost invasion of Manhattan, " "paranormal enthusiasts <NAME> and Abby " "Yates, nuclear engineer <NAME>, " "and subway worker <NAME> band together " "to stop the otherworldly threat.") ghostbusters.poster_url = ("https://upload.wikimedia.org/wikipedia/" "en/3/32/Ghostbusters_2016_film_poster.png") ghostbusters.trailer_url = "https://www.youtube.com/watch?v=w3ugHP-yZXw" # Olympus olympus = media.Movie("Olympus Has Fallen") olympus.storyline = ("Disgraced Secret Service agent (and former " "presidential guard) <NAME> finds himself " "trapped inside the White House in the wake of a " "terrorist attack; using his inside knowledge, " "Banning works with national security to rescue " "the President from his kidnappers.") olympus.poster_url = ("https://upload.wikimedia.org/wikipedia/en/b/bf/" "Olympus_Has_Fallen_poster.jpg") olympus.trailer_url = "https://www.youtube.com/watch?v=vwx1f0kyNwI" # Angry Birds angry_birds = media.Movie("The Angry Birds Movie") angry_birds.storyline = ("Find out why the birds are so angry. When an " "island populated by happy, flightless birds " "is visited by mysterious green piggies, it's " "up to three unlikely outcasts - Red, Chuck " "and Bomb - to figure out what the pigs are up " "to.") angry_birds.poster_url = ("https://upload.wikimedia.org/wikipedia/en/f/" "f9/The_Angry_Birds_Movie_poster.png") angry_birds.trailer_url = "https://www.youtube.com/watch?v=1U2DKKqxHgE" # Ironman ironman = media.Movie("Iron Man") ironman.storyline = ("After being held captive in an Afghan cave, " "billionaire engineer <NAME> creates a unique " "weaponized suit of armor to fight evil.") ironman.poster_url = ("https://upload.wikimedia.org/wikipedia/en/7/70/" "Ironmanposter.JPG") ironman.trailer_url = "https://www.youtube.com/watch?v=8hYlB38asDY" movies = [avatar, deadpool, ghostbusters, olympus, angry_birds, ironman] ft.open_movies_page(movies)
2.59375
3
qiskit_metal/_gui/elements_ui.py
sarafs1926/qiskit-metal
1
3167
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file './elements_ui.ui', # licensing of './elements_ui.ui' applies. # # Created: Wed Jun 16 14:29:03 2021 # by: pyside2-uic running on PySide2 5.13.2 # # WARNING! All changes made in this file will be lost! from PySide2 import QtCore, QtGui, QtWidgets class Ui_ElementsWindow(object): def setupUi(self, ElementsWindow): ElementsWindow.setObjectName("ElementsWindow") ElementsWindow.resize(841, 623) self.centralwidget = QtWidgets.QWidget(ElementsWindow) self.centralwidget.setObjectName("centralwidget") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout_2.setSpacing(0) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setSizeConstraint( QtWidgets.QLayout.SetDefaultConstraint) self.verticalLayout.setObjectName("verticalLayout") self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.btn_refresh = QtWidgets.QPushButton(self.centralwidget) self.btn_refresh.setCursor(QtCore.Qt.ClosedHandCursor) self.btn_refresh.setText("") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(":/refresh"), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.btn_refresh.setIcon(icon) self.btn_refresh.setIconSize(QtCore.QSize(20, 20)) self.btn_refresh.setAutoDefault(False) self.btn_refresh.setDefault(False) self.btn_refresh.setFlat(True) self.btn_refresh.setObjectName("btn_refresh") self.horizontalLayout.addWidget(self.btn_refresh) self.label = QtWidgets.QLabel(self.centralwidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.label.sizePolicy().hasHeightForWidth()) self.label.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setWeight(75) font.setBold(True) self.label.setFont(font) self.label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter) self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) self.combo_element_type = QtWidgets.QComboBox(self.centralwidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.combo_element_type.sizePolicy().hasHeightForWidth()) self.combo_element_type.setSizePolicy(sizePolicy) self.combo_element_type.setCurrentText("") self.combo_element_type.setSizeAdjustPolicy( QtWidgets.QComboBox.AdjustToContents) self.combo_element_type.setObjectName("combo_element_type") self.horizontalLayout.addWidget(self.combo_element_type) self.line = QtWidgets.QFrame(self.centralwidget) self.line.setFrameShape(QtWidgets.QFrame.VLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.horizontalLayout.addWidget(self.line) self.label_3 = QtWidgets.QLabel(self.centralwidget) font = QtGui.QFont() font.setWeight(75) font.setBold(True) self.label_3.setFont(font) self.label_3.setObjectName("label_3") self.horizontalLayout.addWidget(self.label_3) self.label_2 = QtWidgets.QLabel(self.centralwidget) self.label_2.setObjectName("label_2") self.horizontalLayout.addWidget(self.label_2) self.lineEdit = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit.setObjectName("lineEdit") self.horizontalLayout.addWidget(self.lineEdit) self.label_4 = QtWidgets.QLabel(self.centralwidget) self.label_4.setObjectName("label_4") self.horizontalLayout.addWidget(self.label_4) self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_2.setObjectName("lineEdit_2") self.horizontalLayout.addWidget(self.lineEdit_2) self.line_2 = QtWidgets.QFrame(self.centralwidget) self.line_2.setFrameShape(QtWidgets.QFrame.VLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.horizontalLayout.addWidget(self.line_2) self.verticalLayout.addLayout(self.horizontalLayout) self.tableElements = QtWidgets.QTableView(self.centralwidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth( self.tableElements.sizePolicy().hasHeightForWidth()) self.tableElements.setSizePolicy(sizePolicy) self.tableElements.setProperty("showDropIndicator", False) self.tableElements.setDragDropOverwriteMode(False) self.tableElements.setAlternatingRowColors(True) self.tableElements.setSortingEnabled(False) self.tableElements.setObjectName("tableElements") self.verticalLayout.addWidget(self.tableElements) self.verticalLayout_2.addLayout(self.verticalLayout) ElementsWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar() self.menubar.setGeometry(QtCore.QRect(0, 0, 841, 22)) self.menubar.setObjectName("menubar") ElementsWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(ElementsWindow) self.statusbar.setEnabled(True) self.statusbar.setObjectName("statusbar") ElementsWindow.setStatusBar(self.statusbar) self.retranslateUi(ElementsWindow) QtCore.QObject.connect(self.combo_element_type, QtCore.SIGNAL("currentIndexChanged(QString)"), ElementsWindow.combo_element_type) QtCore.QObject.connect(self.btn_refresh, QtCore.SIGNAL("clicked()"), ElementsWindow.force_refresh) QtCore.QMetaObject.connectSlotsByName(ElementsWindow) def retranslateUi(self, ElementsWindow): ElementsWindow.setWindowTitle( QtWidgets.QApplication.translate("ElementsWindow", "MainWindow", None, -1)) self.btn_refresh.setToolTip( QtWidgets.QApplication.translate("ElementsWindow", "Force refresh the table ", None, -1)) self.btn_refresh.setStatusTip( QtWidgets.QApplication.translate("ElementsWindow", "Force refresh the table ", None, -1)) self.btn_refresh.setWhatsThis( QtWidgets.QApplication.translate("ElementsWindow", "Force refresh the table ", None, -1)) self.btn_refresh.setAccessibleDescription( QtWidgets.QApplication.translate("ElementsWindow", "Force refresh the table ", None, -1)) self.label.setText( QtWidgets.QApplication.translate("ElementsWindow", "Element type: ", None, -1)) self.combo_element_type.setToolTip( QtWidgets.QApplication.translate( "ElementsWindow", "<html><head/><body><p>Select the element table you wish to view</p></body></html>", None, -1)) self.label_3.setText( QtWidgets.QApplication.translate("ElementsWindow", " Filter: ", None, -1)) self.label_2.setText( QtWidgets.QApplication.translate("ElementsWindow", "Component: ", None, -1)) self.label_4.setText( QtWidgets.QApplication.translate("ElementsWindow", " Layer: ", None, -1)) from . import main_window_rc_rc
1.40625
1
Python/function.py
manishaverma1012/programs
0
3168
def cube(number): return number*number*number digit = input(" the cube of which digit do you want >") result = cube(int(digit)) print(result)
4.0625
4
tests/test_runner.py
elifesciences/proofreader-python
1
3169
<reponame>elifesciences/proofreader-python<gh_stars>1-10 try: from unittest.mock import patch except ImportError: # pragma: no cover from mock import patch from proofreader.runner import run, _run_command def test_it_will_return_1_exit_code_on_failure(bad_py_file): try: run(targets=[bad_py_file.strpath]) except SystemExit as exception: assert exception.code == 1 def test_it_will_return_zero_exit_code_on_success(good_py_file): try: run(targets=[good_py_file.strpath]) except SystemExit as exception: assert exception.code == 0 def test_it_returns_zero_exit_code_on_builtin_shadowing_fail(builtin_fail_py_file): try: run(targets=[builtin_fail_py_file.strpath]) except SystemExit as exception: assert exception.code == 0 def test_run_command_will_return_a_bool(): with patch('proofreader.runner.Popen') as mock_popen: mock_popen.returncode = 0 result = _run_command('dummy_cmd', [''], ['']) assert isinstance(result, bool) def test_will_return_zero_on_success_with_license_check(good_py_file): try: run(targets=[good_py_file.strpath], check_licenses=True) except SystemExit as exception: assert exception.code == 0
2.671875
3
tanim/core/container/container.py
wofeicaoge/Tanim
0
3170
from tanim.utils.config_ops import digest_config from tanim.utils.iterables import list_update # Currently, this is only used by both Scene and Mobject. # Still, we abstract its functionality here, albeit purely nominally. # All actual implementation has to be handled by derived classes for now. class Container(object): def __init__(self, **kwargs): digest_config(self, kwargs) self.submobjects = [] # Is it really better to name it submobjects? def add(self, *mobjects): if self in mobjects: raise Exception("Mobject cannot contain self") self.submobjects = list_update(self.submobjects, mobjects) return self def add_to_back(self, *mobjects): self.remove(*mobjects) self.submobjects = list(mobjects) + self.submobjects return self def remove(self, *mobjects, ): for mobject in mobjects: for submod in self.submobjects: if isinstance(submod, GroupContainer): submod.remove(mobject) elif mobject == submod: self.submobjects.remove(mobject) return self class GroupContainer(Container): def __init__(self, *containers, **kwargs): self.add(*containers)
2.46875
2
article.py
ZACHSTRIVES/AUCSS-StaffPlatform
3
3171
from config import * def fetch_all_article(): try: cur = db.cursor() sql = "SELECT * FROM article WHERE article_status='N'" db.ping(reconnect=True) cur.execute(sql) result = cur.fetchall() db.commit() cur.close() return result except Exception as e: print(e) def add_article_to_db(title, due): try: cur = db.cursor() sql = "INSERT INTO article(article_title,article_dueday)VALUES ('%s','%s')" % (title, due) db.ping(reconnect=True) cur.execute(sql) db.commit() cur.close() except Exception as e: print(e) def fetch_all_mkt_staff(): try: cur = db.cursor() sql = "SELECT Name,email FROM user WHERE type=5" db.ping(reconnect=True) cur.execute(sql) result = cur.fetchall() db.commit() cur.close() return result except Exception as e: print(e) def get_article_id(title): try: cur = db.cursor() sql = "SELECT article_id FROM article WHERE article_title='%s' AND article_status='N'" % title db.ping(reconnect=True) cur.execute(sql) result = cur.fetchone() db.commit() cur.close() return result except Exception as e: print(e) def add_works_to_db(article_id, type, staff, work_due): try: cur = db.cursor() sql = "INSERT INTO article_works(works_type,works_article,works_dueday,works_staff)VALUES (%s,%s,'%s','%s');" % ( type, article_id, work_due, staff) db.ping(reconnect=True) cur.execute(sql) db.commit() cur.close() except Exception as e: print(e) def get_article_s_work(id): try: cur = db.cursor() sql = "SELECT * FROM article_works WHERE works_article=%s ORDER BY works_type" % id db.ping(reconnect=True) cur.execute(sql) result = cur.fetchall() db.commit() cur.close() return result except Exception as e: print(e) def get_user_name(email): try: cur = db.cursor() sql = "SELECT Name FROM user WHERE email='%s'" % email db.ping(reconnect=True) cur.execute(sql) result = cur.fetchone() db.commit() cur.close() return result except Exception as e: print(e) def get_works_list(articles): res = {} for i in range(0, len(articles)): id = articles[i][0] work = [] works = get_article_s_work(id) for w in works: my_list = [w[0], w[1], w[3], get_user_name(w[5])[0]] work.append(my_list) res[id] = work return res def get_your_task_with_article(email, id): try: cur = db.cursor() sql = "SELECT * FROM article_works WHERE works_staff='%s' AND works_article=%s" % (email, id) db.ping(reconnect=True) cur.execute(sql) result = cur.fetchall() db.commit() cur.close() return result except Exception as e: print(e) def get_task_list(email, articles): res = {} for a in articles: id = a[0] tasks = get_your_task_with_article(email, id) res[id] = tasks return res def update_finish_status(type, id): try: type = int(type) cur = db.cursor() sql = '' if type == 1: sql = "UPDATE article SET banner_status='Y' WHERE article_id=%s" % id elif type == 2: sql = "UPDATE article SET text_status='Y' WHERE article_id=%s" % id elif type == 3: sql = "UPDATE article SET style_status='Y' WHERE article_id=%s" % id db.ping(reconnect=True) cur.execute(sql) db.commit() cur.close() except Exception as e: print(e) def update_task_status(id): try: cur = db.cursor() sql = "UPDATE article_works SET is_finished='Y' WHERE works_num=%s" % id db.ping(reconnect=True) cur.execute(sql) db.commit() cur.close() except Exception as e: print(e) def finish_task_in_db(task, article, type): update_task_status(task) update_finish_status(type, article) def count_person_performance(type, email): try: cur = db.cursor() sql = "SELECT * FROM article_works WHERE works_staff='%s' AND works_type=%s AND is_finished='Y'" % (email, type) db.ping(reconnect=True) cur.execute(sql) res = cur.fetchall() db.commit() cur.close() return res except Exception as e: print(e) def count_performance(): all_staff = fetch_all_mkt_staff() performance_list = [] for s in all_staff: email = s[1] banner = count_person_performance(1, email) text = count_person_performance(2, email) style = count_person_performance(3, email) p_list = [s[0], len(banner), len(text), len(style)] performance_list.append(p_list) return performance_list
2.953125
3
12-Querying-Data-II/just_filtering.py
dwang-ischool/w205
23
3172
<reponame>dwang-ischool/w205<gh_stars>10-100 #!/usr/bin/env python """Extract events from kafka and write them to hdfs """ import json from pyspark.sql import SparkSession, Row from pyspark.sql.functions import udf @udf('boolean') def is_purchase(event_as_json): event = json.loads(event_as_json) if event['event_type'] == 'purchase_sword': return True return False def main(): """main """ spark = SparkSession \ .builder \ .appName("ExtractEventsJob") \ .getOrCreate() raw_events = spark \ .read \ .format("kafka") \ .option("kafka.bootstrap.servers", "kafka:29092") \ .option("subscribe", "events") \ .option("startingOffsets", "earliest") \ .option("endingOffsets", "latest") \ .load() purchase_events = raw_events \ .select(raw_events.value.cast('string').alias('raw'), raw_events.timestamp.cast('string')) \ .filter(is_purchase('raw')) extracted_purchase_events = purchase_events \ .rdd \ .map(lambda r: Row(timestamp=r.timestamp, **json.loads(r.raw))) \ .toDF() extracted_purchase_events.printSchema() extracted_purchase_events.show() if __name__ == "__main__": main()
2.65625
3
test.py
navjotk/pysz
3
3173
import numpy as np from pysz import compress, decompress def test_compress_decompress(): a = np.linspace(0, 100, num=1000000).reshape((100, 100, 100)).astype(np.float32) tolerance = 0.0001 compressed = compress(a, tolerance=tolerance) recovered = decompress(compressed, a.shape, a.dtype) assert(a.shape == recovered.shape) assert(np.allclose(a, recovered, atol=tolerance)) test_compress_decompress()
2.46875
2
sparkdq/outliers/params/KSigmaParams.py
PasaLab/SparkDQ
1
3174
import json from sparkdq.outliers.params.OutlierSolverParams import OutlierSolverParams from sparkdq.outliers.OutlierSolver import OutlierSolver class KSigmaParams(OutlierSolverParams): def __init__(self, deviation=1.5): self.deviation = deviation def model(self): return OutlierSolver.kSigma @staticmethod def from_json(json_str): d = json.loads(json_str) return KSigmaParams(d["deviation"])
2.453125
2
webhooks/sentry/alerta_sentry.py
dunzoit/alerta-contrib
0
3175
<reponame>dunzoit/alerta-contrib from alerta.models.alert import Alert from alerta.webhooks import WebhookBase class SentryWebhook(WebhookBase): def incoming(self, query_string, payload): # For Sentry v9 # Defaults to value before Sentry v9 if 'request' in payload.get('event'): key = 'request' else: key = 'sentry.interfaces.Http' if payload.get('event')[key]['env'].get('ENV', 'prod') == 'prod': environment = 'Production' else: environment = 'Development' if payload['level'] == 'error': severity = 'critical' else: severity = 'ok' return Alert( resource=payload['culprit'], event=payload['event']['event_id'], environment=environment, severity=severity, service=[payload['project']], group='Application', value=payload['level'], text='{}\n{}\n{}'.format(payload['message'], payload['event'].get('title', ''), payload['url']), tags=['{}={}'.format(k, v) for k, v in payload['event']['tags']], attributes={'modules': ['{}=={}'.format(k, v) for k, v in payload['event']['modules'].items()]}, origin='sentry.io', raw_data=str(payload) )
2.078125
2
XMLHttpRequest/resources/shift-jis-html.py
watilde/web-platform-tests
4
3176
def main(request, response): headers = [("Content-type", "text/html;charset=shift-jis")] # Shift-JIS bytes for katakana TE SU TO ('test') content = chr(0x83) + chr(0x65) + chr(0x83) + chr(0x58) + chr(0x83) + chr(0x67); return headers, content
2.15625
2
setup.py
dolfim/django-mail-gmailapi
0
3177
import re from setuptools import setup, find_packages import sys if sys.version_info < (3, 5): raise 'must use Python version 3.5 or higher' with open('./gmailapi_backend/__init__.py', 'r') as f: MATCH_EXPR = "__version__[^'\"]+(['\"])([^'\"]+)" VERSION = re.search(MATCH_EXPR, f.read()).group(2).strip() setup( name='django-gmailapi-backend', version=VERSION, packages=find_packages(), author="<NAME>", author_email="<EMAIL>", license="Apache License 2.0", entry_points={ 'console_scripts': [ 'gmail_oauth2 = gmailapi_backend.bin.gmail_oauth2:main', ] }, install_requires=[ 'google-api-python-client~=2.0', 'google-auth>=1.16.0,<3.0.0dev', ], url="https://github.com/dolfim/django-gmailapi-backend", long_description_content_type='text/markdown', long_description=open('README.md').read(), description='Email backend for Django which sends email via the Gmail API', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Framework :: Django', 'Topic :: Communications :: Email', 'Development Status :: 4 - Beta' ], )
1.539063
2
openpeerpower/scripts/ensure_config.py
OpenPeerPower/openpeerpower
0
3178
"""Script to ensure a configuration file exists.""" import argparse import os import openpeerpower.config as config_util from openpeerpower.core import OpenPeerPower # mypy: allow-untyped-calls, allow-untyped-defs def run(args): """Handle ensure config commandline script.""" parser = argparse.ArgumentParser( description=( "Ensure a Open Peer Power config exists, creates one if necessary." ) ) parser.add_argument( "-c", "--config", metavar="path_to_config_dir", default=config_util.get_default_config_dir(), help="Directory that contains the Open Peer Power configuration", ) parser.add_argument("--script", choices=["ensure_config"]) args = parser.parse_args() config_dir = os.path.join(os.getcwd(), args.config) # Test if configuration directory exists if not os.path.isdir(config_dir): print("Creating directory", config_dir) os.makedirs(config_dir) opp = OpenPeerPower() opp.config.config_dir = config_dir config_path = opp.loop.run_until_complete(async_run(opp)) print("Configuration file:", config_path) return 0 async def async_run(opp): """Make sure config exists.""" path = await config_util.async_ensure_config_exists(opp) await opp.async_stop(force=True) return path
3
3
atcoder/abc132A_fifty_fifty.py
uninhm/kyopro
31
3179
# Vicfred # https://atcoder.jp/contests/abc132/tasks/abc132_a # implementation S = list(input()) if len(set(S)) == 2: if S.count(S[0]) == 2: print("Yes") quit() print("No")
3.46875
3
dabl/plot/tests/test_supervised.py
nrohan09-cloud/dabl
500
3180
<reponame>nrohan09-cloud/dabl<gh_stars>100-1000 import pytest import numpy as np import pandas as pd import matplotlib.pyplot as plt import itertools from sklearn.datasets import (make_regression, make_blobs, load_digits, fetch_openml, load_diabetes) from sklearn.preprocessing import KBinsDiscretizer from dabl.preprocessing import clean, detect_types, guess_ordinal from dabl.plot.supervised import ( plot, plot_classification_categorical, plot_classification_continuous, plot_regression_categorical, plot_regression_continuous) from dabl.utils import data_df_from_bunch from dabl import set_config # FIXME: check that target is not y but a column name @pytest.mark.filterwarnings('ignore:the matrix subclass') @pytest.mark.parametrize("continuous_features, categorical_features, task", itertools.product([0, 1, 3, 100], [0, 1, 3, 100], ['classification', 'regression'])) def test_plots_smoke(continuous_features, categorical_features, task): # simple smoke test # should be parametrized n_samples = 100 X_cont, y_cont = make_regression( n_samples=n_samples, n_features=continuous_features, n_informative=min(continuous_features, 2)) X_cat, y_cat = make_regression( n_samples=n_samples, n_features=categorical_features, n_informative=min(categorical_features, 2)) if X_cat.shape[1] > 0: X_cat = KBinsDiscretizer(encode='ordinal').fit_transform(X_cat) cont_columns = ["asdf_%d_cont" % i for i in range(continuous_features)] df_cont = pd.DataFrame(X_cont, columns=cont_columns) if categorical_features > 0: cat_columns = ["asdf_%d_cat" % i for i in range(categorical_features)] df_cat = pd.DataFrame(X_cat, columns=cat_columns).astype('int') df_cat = df_cat.astype("category") X_df = pd.concat([df_cont, df_cat], axis=1) else: X_df = df_cont assert(X_df.shape[1] == continuous_features + categorical_features) X_clean = clean(X_df.copy()) y = y_cont + y_cat if X_df.shape[1] == 0: y = np.random.uniform(size=n_samples) if task == "classification": y = np.digitize(y, np.percentile(y, [5, 10, 60, 85])) X_clean['target'] = y if task == "classification": X_clean['target'] = X_clean['target'].astype('category') types = detect_types(X_clean) column_types = types.T.idxmax() assert np.all(column_types[:continuous_features] == 'continuous') assert np.all(column_types[continuous_features:-1] == 'categorical') if task == "classification": assert column_types[-1] == 'categorical' else: assert column_types[-1] == 'continuous' plot(X_clean, target_col='target') plt.close("all") @pytest.mark.parametrize("add, feature_type, target_type", itertools.product([0, .1], ['continuous', 'categorical'], ['continuous', 'categorical'])) def test_type_hints(add, feature_type, target_type): X = pd.DataFrame(np.random.randint(4, size=100)) + add X['target'] = np.random.uniform(size=100) plot(X, type_hints={0: feature_type, 'target': target_type}, target_col='target') # get title of figure text = plt.gcf()._suptitle.get_text() assert feature_type.capitalize() in text ax = plt.gca() # one of the labels is 'target' iif regression labels = ax.get_ylabel() + ax.get_xlabel() assert ('target' in labels) == (target_type == 'continuous') plt.close("all") def test_float_classification_target(): # check we can plot even if we do classification with a float target X, y = make_blobs() data = pd.DataFrame(X) data['target'] = y.astype(np.float) types = detect_types(data) assert types.categorical['target'] plot(data, target_col='target') # same with "actual float" - we need to specify classification for that :-/ data['target'] = y.astype(np.float) + .2 plot(data, target_col='target', type_hints={'target': 'categorical'}) plt.close("all") @pytest.mark.filterwarnings('ignore:Discarding near-constant') def test_plot_classification_n_classes(): X, y = make_blobs() X = pd.DataFrame(X) X['target'] = 0 with pytest.raises(ValueError, match="Less than two classes"): plot_classification_categorical(X, 'target') with pytest.raises(ValueError, match="Less than two classes"): plot_classification_continuous(X, 'target') def test_plot_wrong_target_type(): X, y = make_blobs() X = pd.DataFrame(X) X['target'] = y with pytest.raises(ValueError, match="need continuous"): plot_regression_categorical(X, 'target') with pytest.raises(ValueError, match="need continuous"): plot_regression_continuous(X, 'target') X['target'] = X[0] with pytest.raises(ValueError, match="need categorical"): plot_classification_categorical(X, 'target') with pytest.raises(ValueError, match="need categorical"): plot_classification_continuous(X, 'target') def test_plot_target_low_card_int(): data = load_digits() df = data_df_from_bunch(data) plot(df[::10], target_col='target') def test_plot_X_y(): X, y = make_blobs() X = pd.DataFrame(X) plot(X, y) def test_plot_regression_numpy(): X, y = make_regression() plot(X, y) def test_plot_lda_binary(): X, y = make_blobs(centers=2) X = pd.DataFrame(X) plot(X, y, univariate_plot='kde') def test_plot_int_column_name(): X, y = make_blobs() X = pd.DataFrame(X) X[3] = y plot(X, target_col=3) def test_negative_ordinal(): # check that a low card int with negative values is plotted correctly data = pd.DataFrame([np.random.randint(0, 10, size=1000) - 5, np.random.randint(0, 2, size=1000)]).T # ensure first column is low_card_int assert (detect_types(data).T.idxmax() == ['low_card_int', 'categorical']).all() assert guess_ordinal(data[0]) # smoke test plot(data, target_col=1) def test_large_ordinal(): # check that large integers don't bring us down (bincount memory error) # here some random phone numbers assert not guess_ordinal(pd.Series([6786930208, 2142878625, 9106275431])) def test_plot_classification_continuous(): data = fetch_openml('MiceProtein') df = data_df_from_bunch(data) # only univariate plots figures = plot_classification_continuous(df, target_col='target', plot_pairwise=False) assert len(figures) == 1 # top 10 axes assert len(figures[0].get_axes()) == 10 # six is the minimum number of features for histograms # (last column is target) figures = plot_classification_continuous(df.iloc[:, -7:], target_col='target', plot_pairwise=False) assert len(figures) == 1 assert len(figures[0].get_axes()) == 6 # for 5 features, do full pairplot figures = plot_classification_continuous(df.iloc[:, -6:], target_col='target', plot_pairwise=False) assert len(figures) == 1 # diagonal has twin axes assert len(figures[0].get_axes()) == 5 * 5 + 5 # also do pairwise plots figures = plot_classification_continuous(df, target_col='target', random_state=42) # univariate, pairwise, pca, lda assert len(figures) == 4 # univariate axes = figures[0].get_axes() assert len(axes) == 10 # known result assert axes[0].get_xlabel() == "SOD1_N" # bar plot never has ylabel assert axes[0].get_ylabel() == "" # pairwise axes = figures[1].get_axes() assert len(axes) == 4 # known result assert axes[0].get_xlabel() == "SOD1_N" assert axes[0].get_ylabel() == 'S6_N' # PCA axes = figures[2].get_axes() assert len(axes) == 4 # known result assert axes[0].get_xlabel() == "PCA 1" assert axes[0].get_ylabel() == 'PCA 5' # LDA axes = figures[3].get_axes() assert len(axes) == 4 # known result assert axes[0].get_xlabel() == "LDA 0" assert axes[0].get_ylabel() == 'LDA 1' def test_plot_string_target(): X, y = make_blobs(n_samples=30) data = pd.DataFrame(X) y = pd.Series(y) y[y == 0] = 'a' y[y == 1] = 'b' y[y == 2] = 'c' data['target'] = y plot(data, target_col='target') def test_na_vals_reg_plot_raise_warning(): X, y = load_diabetes(return_X_y=True) X = pd.DataFrame(X) y[::50] = np.NaN X['target_col'] = y with pytest.warns(UserWarning, match="Missing values in target_col have " "been removed for regression"): plot(X, 'target_col') with pytest.warns(UserWarning, match="Missing values in target_col have " "been removed for regression"): plot_regression_continuous(X, 'target_col') with pytest.warns(UserWarning, match="Missing values in target_col have " "been removed for regression"): plot_regression_categorical(X, 'target_col') def test_plot_regression_continuous_with_target_outliers(): df = pd.DataFrame( data={ "feature": np.random.randint(low=1, high=100, size=200), # target values are bound between 50 and 100 "target": np.random.randint(low=50, high=100, size=200) } ) # append single outlier record with target value 0 df = df.append({"feature": 50, "target": 0}, ignore_index=True) with pytest.warns( UserWarning, match="Dropped 1 outliers in column target." ): plot_regression_continuous(df, 'target') def test_plot_regression_categorical_missing_value(): df = pd.DataFrame({'y': np.random.normal(size=300)}) df.loc[100:200, 'y'] += 1 df.loc[200:300, 'y'] += 2 df['x'] = 'a' df.loc[100:200, 'x'] = 'b' df.loc[200:300, 'x'] = np.NaN res = plot(df, target_col='y') assert len(res[1][0, 0].get_yticklabels()) == 3 assert res[1][0, 0].get_yticklabels()[2].get_text() == 'dabl_mi...' def test_label_truncation(): a = ('a_really_long_name_that_would_mess_up_the_layout_a_lot' '_by_just_being_very_long') b = ('the_target_that_has_an_equally_long_name_which_would_' 'mess_up_everything_as_well_but_in_different_places') df = pd.DataFrame({a: np.random.uniform(0, 1, 1000)}) df[b] = df[a] + np.random.uniform(0, 0.1, 1000) res = plot_regression_continuous(df, target_col=b) assert res[0, 0].get_ylabel() == 'the_target_that_h...' assert res[0, 0].get_xlabel() == 'a_really_long_nam...' set_config(truncate_labels=False) res = plot_regression_continuous(df, target_col=b) assert res[0, 0].get_ylabel() == b assert res[0, 0].get_xlabel() == a set_config(truncate_labels=True)
2.484375
2
scripts/calculate_rank.py
daniel-theis/multicore-test-harness
15
3181
<reponame>daniel-theis/multicore-test-harness ################################################################################ # Copyright (c) 2017 <NAME>, <NAME>, <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ################################################################################ import sys import json from pprint import pprint class CalculateRank(object): def __init__(self, input_file): self._input_file = input_file def get_rank(self): # Read the configuration in the JSON file with open(self._input_file) as data_file: experiments_object = json.load(data_file) # Sort all the configurations in a list dict_list = list() for experiment in experiments_object: ranked_list = experiments_object[experiment]["it"] od = list(sorted(ranked_list.values(), key=lambda x:x['q_value'], reverse=True)) dict_list.append(od) # for it in dict_list: # print() # print() # for i in range(len(it)): # print(it[i]['mapping']) # print(it[i]['q_value']) # For each environment. get the rank in the other experiments and store in 'rank' for it in dict_list[0]: environment = it['mapping'] rank_list = list() # Look it up for each victim(experiment) for it2 in dict_list: # Find its rank there for i in range(len(it2)): env = it2[i]['mapping'] if environment == env: rank_here = i break rank_list.append(rank_here) it['rank'] = rank_list # Identify the ones that are not Pareto optimal rank_list_bad = list() for it1 in dict_list[0]: for it2 in dict_list[0]: if len([i for i, j in zip(it1['rank'], it2['rank']) if i > j]) == len(it1['rank']): rank_list_bad.append(it1) # Put the Pareto Optimal in a list paretto_optimal = list() for it in dict_list[0]: if not (it in rank_list_bad): paretto_optimal.append(it) # If there are ties, try to break them at fewer comparisons if len(paretto_optimal) > 1: rank_list_bad = list() for it1 in paretto_optimal: for it2 in paretto_optimal: if len([i for i, j in zip(it1['rank'], it2['rank']) if i > j]) == len(it1['rank']) - 1: rank_list_bad.append(it1) # Put the tie broken ones in a list paretto_optimal_tie_break = list() for it in paretto_optimal: if not (it in rank_list_bad): paretto_optimal_tie_break.append(it) print("With no tie breaking") for i in range(len(paretto_optimal)): print(paretto_optimal[i]['mapping']) print("With tie breaking") for i in range(len(paretto_optimal_tie_break)): print(paretto_optimal_tie_break[i]['mapping']) else: print(paretto_optimal[0]['mapping']) print("There was no tie breaking") if __name__ == "__main__": if len(sys.argv) != 2: print("usage: " + sys.argv[0] + " <ranked_environments>.json\n") exit(1) rank = CalculateRank(sys.argv[1]) rank.get_rank()
1.53125
2
contrib/antlrqueryparser/src/python/generate_asts.py
marblestation/montysolr
24
3182
import sys import subprocess as sub import os """ Simple utility script to generate HTML charts of how ANTLR parses every query and what is the resulting AST. """ def run(grammar_name, basedir='', cp='.:/dvt/antlr-142/lib/antlr-3.4-complete.jar:/x/dev/antlr-34/lib/antlr-3.4-complete.jar', grammardir='', java_executable='java', dot_executable='dot' ): if not basedir: basedir = os.path.abspath('../../../../../../../../../../bin') old_dir = os.getcwd() thisdir = grammardir if not thisdir: thisdir = os.path.dirname(os.path.abspath(__file__)) os.chdir(thisdir) cp += os.pathsep + basedir #print "We'll generate ANTLR graphs\ngramar: %s\nbasedir: %s\nclasspath: %s\nparserdir: %s" % (grammar_name, basedir, cp, thisdir) grammar_file = os.path.join(thisdir, grammar_name + '.g') if not os.path.exists(grammar_file): raise Exception('Grammar %s does not exist in classpath: %s' % (grammar_file, cp)) tmp_file = os.path.join(basedir, 'ast-tree.dot') index_file = os.path.join(basedir, '%s.html' % grammar_name) gunit_file = os.path.join(thisdir, grammar_name + '.gunit') generate_ast_command = '%s -cp %s org.apache.lucene.queryparser.flexible.aqp.parser.BuildAST %s "%%s"' % (java_executable, cp, grammar_name) generate_svg_command = '%s -Tsvg %s' % (dot_executable, tmp_file) test_cases = load_gunit_file(gunit_file) index_fo = open(index_file, 'w') index_fo.write('<h1>Test cases generated from grammar: %s</h1>\n' % grammar_name) out_lines = [] i = 0 cmds = generate_ast_command.split() cmds_svg = generate_svg_command.split() total = sum(map(lambda x: len(x), test_cases.values())) toc = [] data = [] toc.append('<a name="toc" />') for section,values in test_cases.items(): output = tree = svg = '' toc.append('The rule: <a href="#anchor%s"><pre>%s</pre></a><br/>' % (section, section)) # generate AST tree for query in values: i += 1 cmds[-1] = query #tmp_dot = os.path.join(basedir, 'tmp-%s.dot' % i) tmp_dot = tmp_file if os.path.exists(tmp_dot): os.remove(tmp_dot) toc.append('%s. <a href="#anchor%s"><pre>%s</pre></a><br/>' % (i, i, query)) print '// %s/%s :: %s' % (i, total, query) #generate graph p = sub.Popen(cmds,stdout=sub.PIPE,stderr=sub.PIPE) output, errors = p.communicate() if output: fo = open(tmp_dot, 'w') fo.write(output) fo.close() else: print 'Error generating AST for: ' + query print errors if 'java.lang.ClassNotFoundException' in errors: raise Exception('Please fix your classpath') continue #generate tree cmds.append(section) cmds.append("tree") p = sub.Popen(cmds,stdout=sub.PIPE,stderr=sub.PIPE) tree, errors = p.communicate() if tree: q = query.replace('\\', '\\\\').replace('"', '\\"').replace('\'', '\\\'') t = tree.strip().replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'") print "\"%s\" -> \"%s\"" % (q, t) else: print 'Error generating AST for: ' + query print errors tree = errors cmds.pop() cmds.pop() cmds_svg[-1] = tmp_dot try: p = sub.Popen(cmds_svg,stdout=sub.PIPE,stderr=sub.PIPE) except Exception, e: print "The following command failed:" print ' '.join(cmds_svg) raise e output, errors = p.communicate() data.append(' <a name="anchor%s"/><h3>%s. <pre">%s</pre>&nbsp;&nbsp; <a href="#toc">^</a> </h3>' % (i, i, query)) data.append(output) data.append('<br/><pre>' + tree + '</pre>') data.append('<br/>') index_fo.write(''' <html> <head> <meta http-equiv="Content-Type" content="text/html;charset=utf-8" /> <style type="text/css"> pre {display:inline;} </style> </head> </body> ''') index_fo.write('\n'.join(toc)) index_fo.write('\n'.join(data)) index_fo.write(''' </body> </html> ''') index_fo.close() print 'HTML charts generated into:', index_fo.name os.chdir(old_dir) def load_gunit_file(gunit_file): fi = open(gunit_file, 'r') test_cases = {} section = None for line in fi: l = line.strip() if not l or l[:2] == '//': continue parts = split_line(l) if len(parts) == 1 and parts[0][-1] == ':': section = parts[0][:-1] test_cases.setdefault(section, []) elif len(parts) > 1 and parts[1].lower() != 'fails': query = parts[0] query = query.replace('\\\"', '"').replace('\\\'', '\'').replace('\\\\', '\\') test_cases[section].append(query) fi.close() return test_cases def split_line(line): line = line.replace('->', '') start = 0 last_pos = None parts = [] while line.find('"', start) > -1: p = line.index('"', start) start = p+1 if line[p-1] != '\\': if last_pos is None: last_pos = p else: parts.append(line[last_pos+1:p]) parts.append(line[p+1:].strip()) last_pos = None break if not parts: parts.append(line.strip()) return parts if __name__ == '__main__': if len(sys.argv) == 1: sys.argv.insert(1, "StandardLuceneGrammar") run(*sys.argv[1:])
2.78125
3
visual_perception/Detection/yolov4/__init__.py
SSusantAchary/Visual-Perception
0
3183
<reponame>SSusantAchary/Visual-Perception """ MIT License Copyright (c) 2020 <NAME> <<EMAIL>> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from visual_perception.Detection.yolov4.tf import YOLOv4 as yolo_main import numpy as np import cv2 labels = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'} class YOLOv4: def __init__(self): self.weights_path = "" self.model = None self.yolo_classes = "" self.iou = 0 self.score = 0 self.input_shape = 0 self.output_path = "" def load_model(self, weights_path:str = None, classes_path:str = None, input_shape:int = 608): if (weights_path is None) or (classes_path is None): raise RuntimeError ('weights_path AND classes_path should not be None.') self.yolo_classes = classes_path self.weights_path = weights_path self.input_shape = input_shape self.model = yolo_main(shape = self.input_shape) self.model.classes = self.yolo_classes self.model.make_model() self.model.load_weights(self.weights_path, weights_type = 'yolo') def predict(self, img:np.ndarray, output_path:str, iou = 0.45, score = 0.25, custom_objects:dict = None, debug=True): self.output_path = output_path self.iou = iou self.score = score #img = np.array(Image.open(img))[..., ::-1] pred_bboxes = self.model.predict(img, iou_threshold = self.iou, score_threshold = self.score) boxes = [] if (custom_objects != None): for i in range(len(pred_bboxes)): check_name = labels[pred_bboxes[i][4]] check = custom_objects.get(check_name, 'invalid') if check == 'invalid': continue elif check == 'valid': boxes.append(list(pred_bboxes[i])) boxes = np.array(boxes) res = self.model.draw_bboxes(img, boxes) if debug: cv2.imwrite(self.output_path, res) else: res = self.model.draw_bboxes(img, pred_bboxes) if debug: cv2.imwrite(self.output_path, res) return res class TinyYOLOv4: def __init__(self): self.weights_path = "" self.model = None self.yolo_classes = "" self.iou = 0 self.score = 0 self.input_shape = 0 self.output_path = "" def load_model(self, weights_path:str = None, classes_path:str = None, input_shape:int = 0): if (weights_path is None) or (classes_path is None): raise RuntimeError ('weights_path AND classes_path should not be None.') self.yolo_classes = classes_path self.weights_path = weights_path self.input_shape = input_shape self.model = yolo_main(tiny = True, shape = self.input_shape) self.model.classes = self.yolo_classes self.model.make_model() self.model.load_weights(self.weights_path, weights_type = 'yolo') def predict(self, img:np.ndarray, output_path:str, iou = 0.4, score = 0.07, custom_objects:dict = None, debug=True): self.output_path = output_path self.iou = iou self.score = score #img = np.array(Image.open(img))[..., ::-1] pred_bboxes = self.model.predict(img, iou_threshold = self.iou, score_threshold = self.score) boxes = [] if (custom_objects != None): for i in range(len(pred_bboxes)): check_name = labels[pred_bboxes[i][4]] check = custom_objects.get(check_name, 'invalid') if check == 'invalid': continue elif check == 'valid': boxes.append(list(pred_bboxes[i])) boxes = np.array(boxes) res = self.model.draw_bboxes(img, boxes) if debug: cv2.imwrite(self.output_path, res) else: res = self.model.draw_bboxes(img, pred_bboxes) if debug: cv2.imwrite(self.output_path, res) return res
1.296875
1
server/mqtt/handler.py
rishab-rb/MyIOTMap
1
3184
<reponame>rishab-rb/MyIOTMap import paho.client as mqtt HOST = 'localhost' PORT = 1883 class MQTTConnector: def __init__(self, host, port): host = host port = port client = mqtt.Client() def connect(): self.client.connect(self.host, self.port, 60) def run(self): self.client.loop_forever() class MQTTSubscriber: def __init__(self, *args, **kwargs): super(MQTTSubscriber, self).__init__(*args, **kwargs) class MQTTPublisher: def __init__(self, host)
2.71875
3
scripts/spacy_files/similarity_replacement.py
HighDeFing/thesis_v4
0
3185
#!/bin/env python from black import main import spacy import json from spacy import displacy import unidecode import pandas as pd import numpy as np import os csv_source = "scripts/spacy_files/data/thesis_200_with_school.csv" df = pd.read_csv(csv_source) df = df[df['isScan']==False] df = df.sort_values('isScan', ascending=False) text1= "Escuela de Enfermería" text2 = "ESCUELA DE ENFERMERIA" file = open("scripts/spacy_files/data/escuelas.json", "r") file = json.load(file) temp_list = [] for facultad in file: temp_list.append(facultad['escuela']) #print(facultad['escuela']) escuelas = [item for sublist in temp_list for item in sublist] # make the list flat #print(escuelas) text1_u = unidecode.unidecode(text1) text1_l_u = text1_u.lower() text2_l_u = unidecode.unidecode(text2).lower() print(text1_l_u, "<-->", text2_l_u) if text1_l_u == text2_l_u: print(text1, " is correct.") def unaccent_list(accent_list): unaccented_schools = [] for sch in accent_list: unaccented_schools.append(unidecode.unidecode(sch).lower()) return unaccented_schools def set_school_to_unaccent(escuelas): escuelas = unaccent_list(escuelas) return escuelas def create_dictionary(schools): myDict = dict((e,i) for i,e in enumerate(schools)) return myDict def set_schools_accents(row, dict, dict_c): index = dict.get(row.lower()) key_list = list(dict_c.keys()) val_list = list(dict_c.values()) try: position = val_list.index(index) key_list[position] except: return None if __name__ == "__main__": u_escuelas = set_school_to_unaccent(escuelas) u_escuelas_dict = create_dictionary(u_escuelas) escuelas_dict = create_dictionary(escuelas) print(u_escuelas_dict) print(escuelas_dict) print(set_schools_accents("No school", u_escuelas_dict, escuelas_dict))
3.09375
3
test/unittest_base.py
dat-boris/tensorforce
0
3186
<filename>test/unittest_base.py # Copyright 2018 Tensorforce Team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from copy import deepcopy from datetime import datetime import os import sys import warnings from tensorforce import TensorforceError from tensorforce.agents import Agent from tensorforce.core.layers import Layer from tensorforce.environments import Environment from tensorforce.execution import Runner from test.unittest_environment import UnittestEnvironment os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' class UnittestBase(object): """ Unit-test base class. """ # Unittest num_updates = None num_episodes = None num_timesteps = None # Environment min_timesteps = 1 states = dict( bool_state=dict(type='bool', shape=(1,)), int_state=dict(type='int', shape=(2,), num_values=4), float_state=dict(type='float', shape=(1, 1, 2)), bounded_state=dict(type='float', shape=(), min_value=-0.5, max_value=0.5) ) actions = dict( bool_action=dict(type='bool', shape=(1,)), int_action=dict(type='int', shape=(2,), num_values=4), float_action=dict(type='float', shape=(1, 1)), bounded_action=dict(type='float', shape=(2,), min_value=-0.5, max_value=0.5) ) # Exclude action types exclude_bool_action = False exclude_int_action = False exclude_float_action = False exclude_bounded_action = False # Agent agent = dict( update=4, policy=dict(network=dict(type='auto', size=8, depth=1, internal_rnn=2)), objective='policy_gradient', reward_estimation=dict(horizon=3) ) # Tensorforce config require_observe = False require_all = False def setUp(self): warnings.filterwarnings( action='ignore', message='Converting sparse IndexedSlices to a dense Tensor of unknown shape' ) def start_tests(self, name=None): """ Start unit-test method. """ if name is None: sys.stdout.write('\n{} {}: '.format( datetime.now().strftime('%H:%M:%S'), self.__class__.__name__[4:] )) else: sys.stdout.write('\n{} {} ({}): '.format( datetime.now().strftime('%H:%M:%S'), self.__class__.__name__[4:], name )) sys.stdout.flush() def finished_test(self, assertion=None): """ Finished unit-test. """ if assertion is None: assertion = True else: self.assertTrue(expr=assertion) if assertion: sys.stdout.write('.') sys.stdout.flush() def prepare( self, environment=None, min_timesteps=None, states=None, actions=None, exclude_bool_action=False, exclude_int_action=False, exclude_float_action=False, exclude_bounded_action=False, require_observe=False, require_all=False, **agent ): """ Generic unit-test preparation. """ Layer.layers = None if environment is None: if states is None: states = deepcopy(self.__class__.states) if actions is None: actions = deepcopy(self.__class__.actions) if exclude_bool_action or self.__class__.exclude_bool_action: actions.pop('bool_action') if exclude_int_action or self.__class__.exclude_int_action: actions.pop('int_action') if exclude_float_action or self.__class__.exclude_float_action: actions.pop('float_action') if exclude_bounded_action or self.__class__.exclude_bounded_action: actions.pop('bounded_action') if min_timesteps is None: min_timesteps = self.__class__.min_timesteps environment = UnittestEnvironment( states=states, actions=actions, min_timesteps=min_timesteps ) elif min_timesteps is not None: raise TensorforceError.unexpected() environment = Environment.create(environment=environment, max_episode_timesteps=5) for key, value in self.__class__.agent.items(): if key not in agent: agent[key] = value if self.__class__.require_all or require_all: config = None elif self.__class__.require_observe or require_observe: config = dict(api_functions=['reset', 'act', 'observe']) else: config = dict(api_functions=['reset', 'act']) agent = Agent.create(agent=agent, environment=environment, config=config) return agent, environment def unittest( self, num_updates=None, num_episodes=None, num_timesteps=None, environment=None, min_timesteps=None, states=None, actions=None, exclude_bool_action=False, exclude_int_action=False, exclude_float_action=False, exclude_bounded_action=False, require_observe=False, require_all=False, **agent ): """ Generic unit-test. """ agent, environment = self.prepare( environment=environment, min_timesteps=min_timesteps, states=states, actions=actions, exclude_bool_action=exclude_bool_action, exclude_int_action=exclude_int_action, exclude_float_action=exclude_float_action, exclude_bounded_action=exclude_bounded_action, require_observe=require_observe, require_all=require_all, **agent ) self.runner = Runner(agent=agent, environment=environment) assert (num_updates is not None) + (num_episodes is not None) + \ (num_timesteps is not None) <= 1 if num_updates is None and num_episodes is None and num_timesteps is None: num_updates = self.__class__.num_updates num_episodes = self.__class__.num_episodes num_timesteps = self.__class__.num_timesteps if num_updates is None and num_episodes is None and num_timesteps is None: num_updates = 2 assert (num_updates is not None) + (num_episodes is not None) + \ (num_timesteps is not None) == 1 evaluation = not any([ require_all, require_observe, self.__class__.require_all, self.__class__.require_observe ]) self.runner.run( num_episodes=num_episodes, num_timesteps=num_timesteps, num_updates=num_updates, use_tqdm=False, evaluation=evaluation ) self.runner.close() agent.close() environment.close() self.finished_test()
2.1875
2
mspray/apps/reveal/__init__.py
onaio/mspray
0
3187
"""init module for reveal app""" # pylint: disable=invalid-name default_app_config = "mspray.apps.reveal.apps.RevealConfig" # noqa
1.210938
1
guifw/models/port.py
luizerico/PyGuiFW
1
3188
from django.db import models from django import forms from audit_log.models.managers import AuditLog # Create your models here. class Port(models.Model): name = models.CharField(max_length=250) port = models.CharField(max_length=250) description = models.TextField(blank=True) audit_log = AuditLog() #icon = models.ImageField(upload_to='images', blank=True) def __str__(self): return self.name class FormPort(forms.ModelForm): pass class Meta: model = Port
2.078125
2
app/backend/arm/migrations/0002_auto_20190924_1712.py
karstenv/nmp-arm
2
3189
<reponame>karstenv/nmp-arm # Generated by Django 2.2.5 on 2019-09-25 00:12 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('arm', '0001_initial'), ] operations = [ migrations.DeleteModel( name='CautionMessage', ), migrations.DeleteModel( name='RiskRatingValue', ), ]
1.367188
1
webcam_demo.py
taranek/tennis-stats-provider
0
3190
import tensorflow as tf import json import math import cv2 import time import argparse import concurrent.futures import posenet import keyboard import sys import numpy as np from threading import Thread from slugify import slugify parser = argparse.ArgumentParser() parser.add_argument('--model', type=int, default=101) parser.add_argument('--cam_id', type=int, default=0) parser.add_argument('--cam_width', type=int, default=1280) parser.add_argument('--cam_height', type=int, default=720) parser.add_argument('--scale_factor', type=float, default=0.7125) parser.add_argument('--file', type=str, default=None, help="Optionally use a video file instead of a live camera") args = parser.parse_args() def main(): # tf.config.threading.set_inter_op_parallelism_threads(0) # tf.config.threading.set_intra_op_parallelism_threads(0) # print(tf.config.threading.get_inter_op_parallelism_threads()) # print(tf.config.threading.get_intra_op_parallelism_threads()) with tf.compat.v1.Session() as sess: model_cfg, model_outputs = posenet.load_model(args.model, sess) output_stride = model_cfg['output_stride'] if args.file is not None: cap = cv2.VideoCapture(args.file) else: cap = cv2.VideoCapture(args.cam_id) cap.set(3, args.cam_width) cap.set(4, args.cam_height) start = time.time() frame_count = 0 recording = True # ret,frame1 = cap.read() # ret,frame2 = cap.read() file_content = [] while True: # diff = cv2.absdiff(frame1,frame2) # gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) # blur = cv2.GaussianBlur(gray,(15,15),0) # _, thresh = cv2.threshold(blur,20,255,cv2.THRESH_BINARY) # dilated = cv2.dilate(thresh,None, iterations=3) # contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # # if(len(contours)>0): # # print("One:") # # print(dir(contours[0])) # # print("One it is.") # for contour in contours: # (x,y,w,h) = cv2.boundingRect(contour) # if(cv2.contourArea(contour)>400): # continue # cv2.rectangle(frame1,(x,y),(x+w,y+h),(0,255,0),2) # # cv2.drawContours(frame1,contours, -1,(0,255,0),2) # cv2.imshow("feed",frame1) # frame1 = frame2 # ret, frame2 = cap.read() input_image, display_image, output_scale = posenet.read_cap(cap, scale_factor=args.scale_factor, output_stride=output_stride) heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run( model_outputs, feed_dict={'image:0': input_image} ) pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses( heatmaps_result.squeeze(axis=0), offsets_result.squeeze(axis=0), displacement_fwd_result.squeeze(axis=0), displacement_bwd_result.squeeze(axis=0), output_stride=output_stride, max_pose_detections=1, min_pose_score=0.15) keypoint_coords *= output_scale # TODO this isn't particularly fast, use GL for drawing and display someday... # print("\n ===================================== \n") img = posenet.draw_skel_and_kp( display_image, pose_scores, keypoint_scores, keypoint_coords, min_pose_score=0.15, min_part_score=0.15) cv2.imshow('posenet', img) frame_count += 1 if(recording): normalize_poses(keypoint_coords) results = json.dumps({ "timestamp":time.time() - start, "pose_scores":pose_scores.tolist(), "keypoint_scores":keypoint_scores.tolist(), "scores": keypoint_scores.size, "keypoint_coords":normalize_poses(keypoint_coords), "coords": keypoint_coords.size }) file_content.append(results) file_content = file_content[-30:] if cv2.waitKey(1) & keyboard.is_pressed('w'): print('you pressed w - service it was!') time.sleep(0.5) path = "collected/serves/" filename = str(slugify("s-"+str(time.time()))+".txt") x = Thread(target=save_to_file, args=(str(path+filename),str(file_content))) x.start() x.join() file_content = [] if cv2.waitKey(1) & keyboard.is_pressed('d'): print('you pressed d - forehand it was!') time.sleep(0.5) path = "collected/forehand/" filename = str(slugify("f-"+str(time.time()))+".txt") x = Thread(target=save_to_file, args=(str(path+filename),str(file_content))) x.start() x.join() file_content = [] if cv2.waitKey(1) & keyboard.is_pressed('a'): print('you pressed a - backhand it was!') time.sleep(0.5) path = "collected/backhand/" filename = str(slugify("b-"+str(time.time()))+".txt") x = Thread(target=save_to_file, args=(str(path+filename),str(file_content))) x.start() x.join() file_content = [] if cv2.waitKey(1) & keyboard.is_pressed('q'): print('you pressed q - quitting!') cv2.destroyAllWindows() break print('Average FPS: ', frame_count / (time.time() - start)) return 0 def my_function(toPrint): print(toPrint) def save_to_file(filename,data): file = open(filename,'w') file.write(data) file.close() def find_middle(left,right): x = (left[0]+right[0])/2.0 y = (left[1]+right[1])/2.0 return [x,y] def find_distance(pointA,pointB): dist = math.sqrt((pointB[0] - pointA[0])**2 + (pointB[1] - pointA[1])**2) return dist def normalize_poses(poses): leftShoulderCords = poses[0][5] rightShoulderCords = poses[0][6] middleShoulderPoint = find_middle(leftShoulderCords,rightShoulderCords) leftHipCords = poses[0][11] rightHipCords = poses[0][12] middleHipPoint = find_middle(leftHipCords,rightHipCords) armHipDistance = find_distance(middleHipPoint,middleShoulderPoint); normalized = [] for pose in poses[0]: normalized.append( [(pose[0]-middleHipPoint[0])/armHipDistance, (pose[1]-middleHipPoint[1])/armHipDistance] ) return normalized if __name__ == "__main__": main()
2.234375
2
otcextensions/tests/unit/osclient/dcs/v1/fakes.py
zsoltn/python-otcextensions
0
3191
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import datetime import random import uuid import mock from openstackclient.tests.unit import utils from otcextensions.tests.unit.osclient import test_base from otcextensions.sdk.dcs.v1 import backup from otcextensions.sdk.dcs.v1 import config from otcextensions.sdk.dcs.v1 import instance from otcextensions.sdk.dcs.v1 import restore from otcextensions.sdk.dcs.v1 import statistic class TestDCS(utils.TestCommand): def setUp(self): super(TestDCS, self).setUp() self.app.client_manager.dcs = mock.Mock() self.client = self.app.client_manager.dcs self.client.get_instance = mock.Mock() self.client.find_instance = mock.Mock() self.client.instances = mock.Mock() self.client.delete_instance = mock.Mock() self.client.update_instance = mock.Mock() self.client.create_instance = mock.Mock() self.client.extend_instance = mock.Mock() class FakeInstance(test_base.Fake): """Fake one or more Instance""" @classmethod def generate(cls): object_info = { 'name': 'group-' + uuid.uuid4().hex, 'id': 'id-' + uuid.uuid4().hex, 'description': 'SOME description', 'status': random.choice(['CREATING', 'CREATEFILED', 'RUNNING', 'ERROR', 'STARTING', 'RESTARTING', 'CLOSING', 'CLOSED', 'EXTENDING']), 'engine': uuid.uuid4().hex, 'capacity': random.randint(1, 100), 'ip': uuid.uuid4().hex, 'port': random.randint(1, 65535), 'resource_spec_code': random.choice(['dcs.single_node', 'dcs.master_standby', 'dcs.cluster' ]), 'engine_version': uuid.uuid4().hex, 'internal_version': uuid.uuid4().hex, 'charging_mode': random.randint(0, 10), 'vpc_id': uuid.uuid4().hex, 'vpc_name': uuid.uuid4().hex, 'subnet_id': uuid.uuid4().hex, 'subnet_name': uuid.uuid4().hex, 'subnet_cidr': uuid.uuid4().hex, 'security_group_id': uuid.uuid4().hex, 'security_group_name': uuid.uuid4().hex, 'created_at': uuid.uuid4().hex, 'error_code': uuid.uuid4().hex, 'product_id': random.choice(['OTC_DCS_SINGLE', 'OTC_DCS_MS', 'OTC_DCS_CL']), 'available_zones': uuid.uuid4().hex, 'max_memory': random.randint(0, 10), 'used_memory': random.randint(0, 10), 'user_id': uuid.uuid4().hex, 'user_name': uuid.uuid4().hex, 'order_id': uuid.uuid4().hex, 'maintain_begin': uuid.uuid4().hex, 'maintain_end': uuid.uuid4().hex, } obj = instance.Instance.existing(**object_info) return obj class FakeStatistic(test_base.Fake): """Fake one or more Statistic""" @classmethod def generate(cls): object_info = { 'instance_id': 'instance_id-' + uuid.uuid4().hex, 'max_memory': random.randint(1, 65535), 'used_memory': random.randint(1, 65535), 'cmd_get_count': random.randint(1, 65535), 'cmd_set_count': random.randint(1, 65535), 'used_cpu': 'cpu-' + uuid.uuid4().hex, 'input_kbps': 'input-' + uuid.uuid4().hex, 'output_kbps': 'output-' + uuid.uuid4().hex, } obj = statistic.Statistic.existing(**object_info) return obj class FakeBackup(test_base.Fake): """Fake one or more Backup""" @classmethod def generate(cls): object_info = { 'instance_id': 'instance_id-' + uuid.uuid4().hex, 'id': 'id-' + uuid.uuid4().hex, 'size': random.randint(1, 65535), 'period': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'progress': uuid.uuid4().hex, 'created_at': uuid.uuid4().hex, 'updated_at': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'error_code': uuid.uuid4().hex, 'is_restorable': True, } obj = backup.Backup.existing(**object_info) return obj class FakeRestore(test_base.Fake): """Fake one or more Restore""" @classmethod def generate(cls): object_info = { 'instance_id': 'instance_id-' + uuid.uuid4().hex, 'max_memory': random.randint(1, 65535), 'used_memory': random.randint(1, 65535), 'cmd_get_count': random.randint(1, 65535), 'cmd_set_count': random.randint(1, 65535), 'used_cpu': 'cpu-' + uuid.uuid4().hex, 'input_kbps': 'input-' + uuid.uuid4().hex, 'output_kbps': 'output-' + uuid.uuid4().hex } obj = restore.Restore.existing(**object_info) return obj class FakeConfig(test_base.Fake): """Fake one or more Config""" @classmethod def generate(cls): object_info = { 'instance_id': 'instance_id-' + uuid.uuid4().hex, 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'value_type': uuid.uuid4().hex, 'value_range': uuid.uuid4().hex, 'default_value': uuid.uuid4().hex, 'description': uuid.uuid4().hex } obj = config.Config.existing(**object_info) return obj
1.898438
2
tests/dummy_repo/tvm/python/tvm/api.py
csullivan/ffi-navigator
148
3192
from ._ffi.base import string_types from ._ffi.object import register_object, Object from ._ffi.node import register_node, NodeBase from ._ffi.node import convert_to_node as _convert_to_node from ._ffi.node_generic import _scalar_type_inference from ._ffi.function import Function from ._ffi.function import _init_api, register_func, get_global_func, extract_ext_funcs from ._ffi.function import convert_to_tvm_func as _convert_tvm_func from ._ffi.runtime_ctypes import TVMType from . import _api_internal from . import make as _make from . import expr as _expr from . import tensor as _tensor from . import schedule as _schedule from . import container as _container from . import tag as _tag int8 = "int8" int32 = "int32" float32 = "float32" handle = "handle" def min_value(dtype): return _api_internal._min_value(dtype)
1.679688
2
torchattacks/attacks/multiattack.py
Harry24k/adversarial-attacks-pytorch
782
3193
<filename>torchattacks/attacks/multiattack.py import copy import torch from ..attack import Attack class MultiAttack(Attack): r""" MultiAttack is a class to attack a model with various attacks agains same images and labels. Arguments: model (nn.Module): model to attack. attacks (list): list of attacks. Examples:: >>> atk1 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True) >>> atk2 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True) >>> atk = torchattacks.MultiAttack([atk1, atk2]) >>> adv_images = attack(images, labels) """ def __init__(self, attacks, verbose=False): # Check validity ids = [] for attack in attacks: ids.append(id(attack.model)) if len(set(ids)) != 1: raise ValueError("At least one of attacks is referencing a different model.") super().__init__("MultiAttack", attack.model) self.attacks = attacks self.verbose = verbose self._accumulate_multi_atk_records = False self._multi_atk_records = [0.0] self._supported_mode = ['default'] def forward(self, images, labels): r""" Overridden. """ batch_size = images.shape[0] fails = torch.arange(batch_size).to(self.device) final_images = images.clone().detach().to(self.device) labels = labels.clone().detach().to(self.device) multi_atk_records = [batch_size] for _, attack in enumerate(self.attacks): adv_images = attack(images[fails], labels[fails]) outputs = self.model(adv_images) _, pre = torch.max(outputs.data, 1) corrects = (pre == labels[fails]) wrongs = ~corrects succeeds = torch.masked_select(fails, wrongs) succeeds_of_fails = torch.masked_select(torch.arange(fails.shape[0]).to(self.device), wrongs) final_images[succeeds] = adv_images[succeeds_of_fails] fails = torch.masked_select(fails, corrects) multi_atk_records.append(len(fails)) if len(fails) == 0: break if self.verbose: print(self._return_sr_record(multi_atk_records)) if self._accumulate_multi_atk_records: self._update_multi_atk_records(multi_atk_records) return final_images def _clear_multi_atk_records(self): self._multi_atk_records = [0.0] def _covert_to_success_rates(self, multi_atk_records): sr = [((1-multi_atk_records[i]/multi_atk_records[0])*100) for i in range(1, len(multi_atk_records))] return sr def _return_sr_record(self, multi_atk_records): sr = self._covert_to_success_rates(multi_atk_records) return "Attack success rate: "+" | ".join(["%2.2f %%"%item for item in sr]) def _update_multi_atk_records(self, multi_atk_records): for i, item in enumerate(multi_atk_records): self._multi_atk_records[i] += item def save(self, data_loader, save_path=None, verbose=True, return_verbose=False): r""" Overridden. """ self._clear_multi_atk_records() verbose = self.verbose self.verbose = False self._accumulate_multi_atk_records = True for i, attack in enumerate(self.attacks): self._multi_atk_records.append(0.0) rob_acc, l2, elapsed_time = super().save(data_loader, save_path, verbose, return_verbose) sr = self._covert_to_success_rates(self._multi_atk_records) self._clear_multi_atk_records() self._accumulate_multi_atk_records = False self.verbose = verbose if return_verbose: return rob_acc, sr, l2, elapsed_time def _save_print(self, progress, rob_acc, l2, elapsed_time, end): r""" Overridden. """ print("- Save progress: %2.2f %% / Robust accuracy: %2.2f %%"%(progress, rob_acc)+\ " / "+self._return_sr_record(self._multi_atk_records)+\ ' / L2: %1.5f (%2.3f it/s) \t'%(l2, elapsed_time), end=end)
3.171875
3
src/manager/om/script/gspylib/inspection/items/os/CheckPortConflict.py
wotchin/openGauss-server
1
3194
# -*- coding:utf-8 -*- # Copyright (c) 2020 Huawei Technologies Co.,Ltd. # # openGauss is licensed under Mulan PSL v2. # You can use this software according to the terms # and conditions of the Mulan PSL v2. # You may obtain a copy of Mulan PSL v2 at: # # http://license.coscl.org.cn/MulanPSL2 # # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OF ANY KIND, # EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, # MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. # ---------------------------------------------------------------------------- import subprocess from gspylib.inspection.common.CheckItem import BaseItem from gspylib.inspection.common.CheckResult import ResultStatus class CheckPortConflict(BaseItem): def __init__(self): super(CheckPortConflict, self).__init__(self.__class__.__name__) def doCheck(self): cmd = "netstat -apn | grep 'tcp' " \ "| grep 'LISTEN'| awk -F ' ' '$4 ~ /25[0-9][0-9][0-9]/'" (status, output) = subprocess.getstatusoutput(cmd) if (status != 0): self.result.rst = ResultStatus.NG self.result.val = "Failed to excuted commands: %s\noutput:%s " % ( cmd, output) else: if (output.strip() == ""): self.result.rst = ResultStatus.OK self.result.val = "ports is normal" else: self.result.rst = ResultStatus.NG self.result.val = output self.result.raw = "checked ports: (25000-26000)\n" + output def doSet(self): pidList = [] cmd = "netstat -apn| grep 'tcp'" \ "| grep 'LISTEN'| awk -F ' ' '$4 ~ /25[0-9][0-9][0-9]/'" \ "| awk '{print $NF}'" (status, output) = subprocess.getstatusoutput(cmd) if (status == 0 and output != ""): for line in output.split('\n'): if (line.find('/') > 0): pid = line.split('/')[0].strip() if (pid.isdigit()): pidList.append(pid) if (pidList): cmd = "kill -9" for pid in pidList: cmd += " %s" % pid (status, output) = subprocess.getstatusoutput(cmd) if (status != ""): self.result.val = "Failed to kill process.Error:%s\n" % output self.result.val += "The cmd is %s " % cmd else: self.result.val = \ "Successfully killed the process with occupies the port.\n"
2.046875
2
_scripts/_build.py
dfreeman06/wxyz
1
3195
import subprocess import sys from . import ROOT, PY_SRC, _run, PY, DIST CONDA_ORDER = [ "core", "html", "lab", "datagrid", "svg", "tpl-jjinja" "yaml" ] CONDA_BUILD_ARGS = [ "conda-build", "-c", "conda-forge", "--output-folder", DIST / "conda-bld", ] if __name__ == "__main__": for pkg in PY_SRC.glob("wxyz_*"): _run([PY, "setup.py", "sdist", "--dist-dir", DIST / "sdist"], cwd=str(pkg)) try: _run([*CONDA_BUILD_ARGS, "--skip-existing", "."], cwd=ROOT / "recipes") except: for pkg in CONDA_ORDER: _run([*CONDA_BUILD_ARGS, f"wxyz-{pkg}"], cwd=ROOT / "recipes")
1.726563
2
scripts/C189/C189Checkin.py
xiaopowanyi/py_scripts
2
3196
<gh_stars>1-10 import requests, time, re, rsa, json, base64 from urllib import parse s = requests.Session() username = "" password = "" if(username == "" or password == ""): username = input("账号:") password = input("密码:") def main(): login(username, password) rand = str(round(time.time()*1000)) surl = f'https://api.cloud.189.cn/mkt/userSign.action?rand={rand}&clientType=TELEANDROID&version=8.6.3&model=SM-G930K' url = f'https://m.cloud.189.cn/v2/drawPrizeMarketDetails.action?taskId=TASK_SIGNIN&activityId=ACT_SIGNIN' url2 = f'https://m.cloud.189.cn/v2/drawPrizeMarketDetails.action?taskId=TASK_SIGNIN_PHOTOS&activityId=ACT_SIGNIN' headers = { 'User-Agent':'Mozilla/5.0 (Linux; Android 5.1.1; SM-G930K Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 Ecloud/8.6.3 Android/22 clientId/355325117317828 clientModel/SM-G930K imsi/460071114317824 clientChannelId/qq proVersion/1.0.6', "Referer" : "https://m.cloud.189.cn/zhuanti/2016/sign/index.jsp?albumBackupOpened=1", "Host" : "m.cloud.189.cn", "Accept-Encoding" : "gzip, deflate", } response = s.get(surl,headers=headers) netdiskBonus = response.json()['netdiskBonus'] if(response.json()['isSign'] == "false"): print(f"未签到,签到获得{netdiskBonus}M空间") else: print(f"已经签到过了,签到获得{netdiskBonus}M空间") headers = { 'User-Agent':'Mozilla/5.0 (Linux; Android 5.1.1; SM-G930K Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/74.0.3729.136 Mobile Safari/537.36 Ecloud/8.6.3 Android/22 clientId/355325117317828 clientModel/SM-G930K imsi/460071114317824 clientChannelId/qq proVersion/1.0.6', "Referer" : "https://m.cloud.189.cn/zhuanti/2016/sign/index.jsp?albumBackupOpened=1", "Host" : "m.cloud.189.cn", "Accept-Encoding" : "gzip, deflate", } response = s.get(url,headers=headers) try: if ("errorCode" in response.text): print(response.json()['errorCode']) elif (response.json().has_key('description')): description = response.json()['description'] print(f"抽奖获得{description}") except: print(f"抽奖1完成,解析时失败") try: response2 = s.get(url2,headers=headers) if ("errorCode" in response2.text): print(response.json()['errorCode']) elif (response2.json().has_key('description')): description = response2.json()['description'] print(f"抽奖2获得{description}") except: print(f"抽奖2完成,解析时失败") BI_RM = list("0123456789abcdefghijklmnopqrstuvwxyz") def int2char(a): return BI_RM[a] b64map = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def b64tohex(a): d = "" e = 0 c = 0 for i in range(len(a)): if list(a)[i] != "=": v = b64map.index(list(a)[i]) if 0 == e: e = 1 d += int2char(v >> 2) c = 3 & v elif 1 == e: e = 2 d += int2char(c << 2 | v >> 4) c = 15 & v elif 2 == e: e = 3 d += int2char(c) d += int2char(v >> 2) c = 3 & v else: e = 0 d += int2char(c << 2 | v >> 4) d += int2char(15 & v) if e == 1: d += int2char(c << 2) return d def rsa_encode(j_rsakey, string): rsa_key = f"-----BEGIN PUBLIC KEY-----\n{j_rsakey}\n-----END PUBLIC KEY-----" pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(rsa_key.encode()) result = b64tohex((base64.b64encode(rsa.encrypt(f'{string}'.encode(), pubkey))).decode()) return result def calculate_md5_sign(params): return hashlib.md5('&'.join(sorted(params.split('&'))).encode('utf-8')).hexdigest() def login(username, password): url = "https://cloud.189.cn/udb/udb_login.jsp?pageId=1&redirectURL=/main.action" r = s.get(url) captchaToken = re.findall(r"captchaToken' value='(.+?)'", r.text)[0] lt = re.findall(r'lt = "(.+?)"', r.text)[0] returnUrl = re.findall(r"returnUrl = '(.+?)'", r.text)[0] paramId = re.findall(r'paramId = "(.+?)"', r.text)[0] j_rsakey = re.findall(r'j_rsaKey" value="(\S+)"', r.text, re.M)[0] s.headers.update({"lt": lt}) username = rsa_encode(j_rsakey, username) password = rsa_encode(j_rsakey, password) url = "https://open.e.189.cn/api/logbox/oauth2/loginSubmit.do" headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) Gecko/20100101 Firefox/76.0', 'Referer': 'https://open.e.189.cn/', } data = { "appKey": "cloud", "accountType": '01', "userName": f"{{RSA}}{username}", "password": f"{{<PASSWORD>}", "validateCode": "", "captchaToken": captchaToken, "returnUrl": returnUrl, "mailSuffix": "@189.cn", "paramId": paramId } r = s.post(url, data=data, headers=headers, timeout=5) if(r.json()['result'] == 0): print(r.json()['msg']) else: print(r.json()['msg']) redirect_url = r.json()['toUrl'] r = s.get(redirect_url) return s if __name__ == "__main__": main()
2.515625
3
Mmint/CGratio.py
lijiacd985/Mplot
5
3197
import subprocess from .Genome_fasta import get_fasta import matplotlib matplotlib.use('Agg') from matplotlib import pyplot as plt import numpy as np import pysam def run(parser): args = parser.parse_args() bases,chrs = get_fasta(args.genome) l={} for c in chrs: l[c]=len(bases[c]) chrs = set(chrs) #p = subprocess.Popen('bamToBed -i '+args.bamfile,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) reads_num=0 reads_cg_num=[0,0,0] #CG,cg,Cg cgnum_per_read=[] with pysam.AlignmentFile(args.bamfile) as f: for line in f: #t = line.decode('utf-8').strip().split() chr = line.reference_name#t[0] start= line.reference_start end= line.reference_end strand= not line.is_reverse # True +strand; False -strand if not chr in chrs: continue end=min(end+1,l[chr]) reads_num+=1 if strand:#=='+': cg=[bases[chr].count('CG',start,end)+bases[chr].count('Cg',start,end),bases[chr].count('cG',start,end)+bases[chr].count('cg',start,end)] else: cg=[bases[chr].count('GC',start,end)+bases[chr].count('gC',start,end),bases[chr].count('Gc',start,end)+bases[chr].count('gc',start,end)] #We need to consider strand specific situation. #'+' strand we have CG but '-' we should count 'GC'. #print cg # for i in range(1,ls): # r2=read[i] # r1=read[i-1] # if 'G'==r2 or 'g'==r2: # if 'C'==r1: cg[0]+=1 # if 'c'==r1: cg[1]+=1 #count = int(cg[0]>0)+int(cg[1]>0) if cg[0]+cg[1]==0: continue #print cg cgnum_per_read.append(sum(cg)) if cg[0]>0 and cg[1]>0: reads_cg_num[2]+=1 continue if cg[0]>0: reads_cg_num[0]+=1 else: reads_cg_num[1]+=1 #print reads_cg_num #print reads_num plt.figure() plt.subplot(211) labels = ['noCG','NonRepeat CG','Repeat cg','CGcg mix'] colors = ['r','b','g','y'] explode=(0.05,0,0,0) sizes=[reads_num-sum(reads_cg_num)]+reads_cg_num patches,l_text,p_text = plt.pie(sizes,explode=explode,labels=labels,colors=colors, labeldistance = 1.1,autopct = '%3.1f%%',shadow = False, startangle = 90,pctdistance = 0.6) plt.axis('equal') #plt.legend(loc=2,bbox_to_anchor=(0, 0)) ax=plt.subplot(212) t=np.zeros(20) for num in cgnum_per_read: t[min(num-1,19)]+=1 labels = list(map(str,np.arange(1,20)))+['20+'] #print(t) t = (np.array(t).astype(float)/sum(reads_cg_num))*100 plt.bar(np.arange(20),t) ax.set_xticks(np.arange(20)) ax.set_xticklabels(labels) ax.set_ylabel('Percentage of reads including CG') ax.set_xlabel('CG number per read') plt.text(4,max(t)+4,'All reads including CG site: '+str(sum(reads_cg_num))) #print args.output+'.pdf' plt.savefig(args.output+'.pdf') if __name__=="__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('-b','--bamfile',help="bam file name", metavar="FILE") parser.add_argument('-g','--genome',help="Genome fasta file path") parser.add_argument('-o','--output',help="pie figure's filename") run(parser)
2.328125
2
src/furo/__init__.py
sethmlarson/furo
0
3198
<reponame>sethmlarson/furo<filename>src/furo/__init__.py """A clean customisable Sphinx documentation theme.""" __version__ = "2020.9.8.beta2" from pathlib import Path from .body import wrap_tables from .code import get_pygments_style_colors from .navigation import get_navigation_tree from .toc import should_hide_toc def _html_page_context(app, pagename, templatename, context, doctree): if app.config.html_theme != "furo": return # Custom Navigation Tree (adds checkboxes and labels) toctree = context.get("toctree", lambda **kwargs: "") toctree_html = toctree( collapse=False, titles_only=True, maxdepth=-1, includehidden=True ) context["furo_navigation_tree"] = get_navigation_tree(toctree_html) # Custom "should hide ToC" logic context["furo_hide_toc"] = should_hide_toc(context.get("toc", "")) # Allow for hiding toc via ToC in page-wide metadata. if "hide-toc" in (context.get("meta", None) or {}): context["furo_hide_toc"] = True # Inject information about styles colors = get_pygments_style_colors( app.builder.highlighter.formatter_args["style"], fallbacks={"foreground": "#000000", "background": "#FFFFFF"}, ) context["furo_pygments"] = colors # Patch the content if "body" in context: context["body"] = wrap_tables(context["body"]) def setup(app): """Entry point for sphinx theming.""" theme_path = (Path(__file__).parent / "theme").resolve() app.add_html_theme("furo", str(theme_path)) app.connect("html-page-context", _html_page_context)
2.015625
2
experiments/mix_down.py
fretboardfreak/potty_oh
0
3199
#!/usr/bin/env python3 # Copyright 2016 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A test for what happens when two waveforms are averaged together.""" from potty_oh import common from potty_oh.wav_file import wav_file_context from potty_oh.waveform import mix_down from potty_oh.signal_generator import Generator from potty_oh.music.pitch import Key from potty_oh.music.interval import Interval def main(): parser = common.get_cmd_line_parser(description=__doc__) common.ParserArguments.filename(parser) common.ParserArguments.length(parser) common.ParserArguments.framerate(parser) common.ParserArguments.set_defaults(parser, type='constant', length=2.0) args = parser.parse_args() common.defaults.framerate = args.framerate sg = Generator(length=args.length, verbose=args.debug) key = Key() unison = sg.sin_constant(key.interval(Interval.unison)) maj_third = sg.sin_constant(key.interval(Interval.major_third)) min_third = sg.sin_constant(key.interval(Interval.minor_third)) fifth = sg.sin_constant(key.interval(Interval.fifth)) powerchord = unison.mix_down(fifth) maj_triad = powerchord.mix_down(maj_third) min_triad = mix_down(powerchord, min_third) with wav_file_context(args.filename) as fout: fout.write_frames(powerchord.frames) fout.write_frames(maj_triad.frames) fout.write_frames(min_triad.frames) return 0 if __name__ == "__main__": common.call_main(main)
2.78125
3